Browse Source

Revert "Upgrade V8 to 2.0.5"

This reverts commit 20b945df70.

Broken on Hagen's Macintosh. Don't have time to investigate.
v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
a98afdfb2f
  1. 1
      deps/v8/.gitignore
  2. 3
      deps/v8/AUTHORS
  3. 50
      deps/v8/ChangeLog
  4. 19
      deps/v8/SConstruct
  5. 26
      deps/v8/include/v8.h
  6. 7
      deps/v8/src/SConscript
  7. 94
      deps/v8/src/api.cc
  8. 186
      deps/v8/src/arm/assembler-arm.cc
  9. 221
      deps/v8/src/arm/assembler-arm.h
  10. 267
      deps/v8/src/arm/assembler-thumb2-inl.h
  11. 1821
      deps/v8/src/arm/assembler-thumb2.cc
  12. 1027
      deps/v8/src/arm/assembler-thumb2.h
  13. 92
      deps/v8/src/arm/codegen-arm.cc
  14. 21
      deps/v8/src/arm/codegen-arm.h
  15. 21
      deps/v8/src/arm/disasm-arm.cc
  16. 350
      deps/v8/src/arm/fast-codegen-arm.cc
  17. 4
      deps/v8/src/arm/frames-arm.cc
  18. 10
      deps/v8/src/arm/ic-arm.cc
  19. 30
      deps/v8/src/arm/macro-assembler-arm.cc
  20. 6
      deps/v8/src/arm/macro-assembler-arm.h
  21. 20
      deps/v8/src/arm/simulator-arm.cc
  22. 47
      deps/v8/src/arm/stub-cache-arm.cc
  23. 3
      deps/v8/src/arm/virtual-frame-arm.cc
  24. 1
      deps/v8/src/arm/virtual-frame-arm.h
  25. 10
      deps/v8/src/assembler.cc
  26. 4
      deps/v8/src/assembler.h
  27. 17
      deps/v8/src/ast.h
  28. 26
      deps/v8/src/bootstrapper.cc
  29. 4
      deps/v8/src/bootstrapper.h
  30. 143
      deps/v8/src/code-stubs.cc
  31. 19
      deps/v8/src/code-stubs.h
  32. 49
      deps/v8/src/codegen.h
  33. 78
      deps/v8/src/compiler.cc
  34. 6
      deps/v8/src/execution.cc
  35. 15
      deps/v8/src/factory.cc
  36. 6
      deps/v8/src/factory.h
  37. 308
      deps/v8/src/fast-codegen.cc
  38. 195
      deps/v8/src/fast-codegen.h
  39. 17
      deps/v8/src/global-handles.cc
  40. 19
      deps/v8/src/globals.h
  41. 69
      deps/v8/src/heap-inl.h
  42. 5
      deps/v8/src/heap-profiler.cc
  43. 6
      deps/v8/src/heap-profiler.h
  44. 159
      deps/v8/src/heap.cc
  45. 69
      deps/v8/src/heap.h
  46. 11
      deps/v8/src/ia32/assembler-ia32.cc
  47. 1
      deps/v8/src/ia32/assembler-ia32.h
  48. 47
      deps/v8/src/ia32/builtins-ia32.cc
  49. 458
      deps/v8/src/ia32/codegen-ia32.cc
  50. 25
      deps/v8/src/ia32/codegen-ia32.h
  51. 8
      deps/v8/src/ia32/disasm-ia32.cc
  52. 381
      deps/v8/src/ia32/fast-codegen-ia32.cc
  53. 165
      deps/v8/src/ia32/ic-ia32.cc
  54. 48
      deps/v8/src/ia32/macro-assembler-ia32.cc
  55. 22
      deps/v8/src/ia32/macro-assembler-ia32.h
  56. 143
      deps/v8/src/ia32/stub-cache-ia32.cc
  57. 11
      deps/v8/src/ia32/virtual-frame-ia32.cc
  58. 6
      deps/v8/src/ia32/virtual-frame-ia32.h
  59. 22
      deps/v8/src/ic.cc
  60. 24
      deps/v8/src/ic.h
  61. 5
      deps/v8/src/macro-assembler.h
  62. 74
      deps/v8/src/mark-compact.cc
  63. 35
      deps/v8/src/math.js
  64. 5
      deps/v8/src/messages.js
  65. 12
      deps/v8/src/objects-inl.h
  66. 14
      deps/v8/src/objects.cc
  67. 47
      deps/v8/src/objects.h
  68. 3
      deps/v8/src/parser.cc
  69. 10
      deps/v8/src/prettyprinter.cc
  70. 2
      deps/v8/src/prettyprinter.h
  71. 2
      deps/v8/src/rewriter.cc
  72. 105
      deps/v8/src/runtime.cc
  73. 4
      deps/v8/src/runtime.h
  74. 6
      deps/v8/src/runtime.js
  75. 3
      deps/v8/src/scopes.cc
  76. 11
      deps/v8/src/scopes.h
  77. 22
      deps/v8/src/serialize.cc
  78. 4
      deps/v8/src/spaces.cc
  79. 27
      deps/v8/src/spaces.h
  80. 4
      deps/v8/src/stub-cache.cc
  81. 11
      deps/v8/src/stub-cache.h
  82. 2
      deps/v8/src/token.cc
  83. 9
      deps/v8/src/token.h
  84. 2
      deps/v8/src/v8-counters.h
  85. 207
      deps/v8/src/v8natives.js
  86. 4
      deps/v8/src/variables.cc
  87. 14
      deps/v8/src/variables.h
  88. 2
      deps/v8/src/version.cc
  89. 276
      deps/v8/src/x64/codegen-x64.cc
  90. 38
      deps/v8/src/x64/codegen-x64.h
  91. 354
      deps/v8/src/x64/fast-codegen-x64.cc
  92. 103
      deps/v8/src/x64/ic-x64.cc
  93. 125
      deps/v8/src/x64/macro-assembler-x64.cc
  94. 35
      deps/v8/src/x64/macro-assembler-x64.h
  95. 47
      deps/v8/src/x64/stub-cache-x64.cc
  96. 34
      deps/v8/test/cctest/test-api.cc
  97. 33
      deps/v8/test/cctest/test-debug.cc
  98. 12
      deps/v8/test/cctest/test-macro-assembler-x64.cc
  99. 35
      deps/v8/test/mjsunit/compiler/thisfunction.js
  100. 1
      deps/v8/test/mjsunit/fuzz-natives.js

1
deps/v8/.gitignore

@ -14,7 +14,6 @@
*.pdb
#*#
*~
.cpplint-cache
d8
d8_g
shell

3
deps/v8/AUTHORS

@ -13,11 +13,10 @@ Daniel James <dnljms@gmail.com>
Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com>
Joel Stanley <joel.stan@gmail.com>
John Jozwiak <jjozwiak@codeaurora.org>
Matt Hanselman <mjhanselman@gmail.com>
Paolo Giarrusso <p.giarrusso@gmail.com>
Rafal Krypa <rafal@krypa.net>
Rene Rebe <rene@exactcode.de>
Ryan Dahl <coldredlemur@gmail.com>
Patrick Gansterer <paroga@paroga.com>
Subrato K De <subratokde@codeaurora.org>
John Jozwiak <jjozwiak@codeaurora.org>

50
deps/v8/ChangeLog

@ -1,35 +1,3 @@
2009-12-18: Version 2.0.5
Extended to upper limit of map space to allow for 7 times as many map
to be allocated (issue 524).
Improved performance of code using closures.
Improved performance of some binary operations involving doubles.
2009-12-16: Version 2.0.4
Added ECMAScript 5 Object.create.
Improved performance of Math.max and Math.min.
Optimized adding of strings on 64-bit platforms.
Improved handling of external strings by using a separate table
instead of weak handles. This improves garbage collection
performance and uses less memory.
Changed code generation for object and array literals in toplevel
code to be more compact by doing more work in the runtime.
Fixed a crash bug triggered when garbage collection happened during
generation of a callback load inline cache stub.
Fixed crash bug sometimes triggered when local variables shadowed
parameters in functions that used the arguments object.
2009-12-03: Version 2.0.3
Optimized handling and adding of strings, for-in and Array.join.
@ -67,7 +35,7 @@
Reverted a change which caused Chromium interactive ui test
failures.
2009-11-18: Version 2.0.0
Added support for VFP on ARM.
@ -112,7 +80,7 @@
2009-10-16: Version 1.3.16
X64: Convert smis to holding 32 bits of payload.
Introduce v8::Integer::NewFromUnsigned method.
@ -257,7 +225,7 @@
notifications when V8 has not yet been initialized.
Fixed ARM simulator compilation problem on Windows.
2009-08-25: Version 1.3.7
@ -372,9 +340,9 @@
function is a built-in.
Initial implementation of constructor heap profile for JS objects.
More fine grained control of profiling aspects through the API.
Optimized the called as constructor check for API calls.
@ -399,8 +367,8 @@
Added an external allocation limit to avoid issues where small V8
objects would hold on to large amounts of external memory without
causing garbage collections.
Finished more of the inline caching stubs for x64 targets.
Finished more of the inline caching stubs for x64 targets.
2009-07-13: Version 1.2.14
@ -480,9 +448,9 @@
Fixed a bug in the string type inference.
Fixed a bug in the handling of 'constant function' properties.
Improved overall performance.
2009-06-16: Version 1.2.8

19
deps/v8/SConstruct

@ -143,9 +143,6 @@ LIBRARY_FLAGS = {
},
'os:macos': {
'CCFLAGS': ['-ansi', '-mmacosx-version-min=10.4'],
'library:shared': {
'CPPDEFINES': ['V8_SHARED']
}
},
'os:freebsd': {
'CPPPATH' : ['/usr/local/include'],
@ -181,12 +178,6 @@ LIBRARY_FLAGS = {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'armvariant:thumb2': {
'CPPDEFINES': ['V8_ARM_VARIANT_THUMB']
},
'armvariant:arm': {
'CPPDEFINES': ['V8_ARM_VARIANT_ARM']
},
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
'CCFLAGS': ['-m64'],
@ -252,7 +243,6 @@ V8_EXTRA_FLAGS = {
'gcc': {
'all': {
'WARNINGFLAGS': ['-Wall',
'-Werror',
'-W',
'-Wno-unused-parameter',
'-Wnon-virtual-dtor']
@ -665,11 +655,6 @@ SIMPLE_OPTIONS = {
'values': ['default', 'hidden'],
'default': 'hidden',
'help': 'shared library symbol visibility'
},
'armvariant': {
'values': ['arm', 'thumb2', 'none'],
'default': 'none',
'help': 'generate thumb2 instructions instead of arm instructions (default)'
}
}
@ -853,10 +838,6 @@ def PostprocessOptions(options):
# Print a warning if profiling is enabled without profiling support
print "Warning: forcing profilingsupport on when prof is on"
options['profilingsupport'] = 'on'
if (options['armvariant'] == 'none' and options['arch'] == 'arm'):
options['armvariant'] = 'arm'
if (options['armvariant'] != 'none' and options['arch'] != 'arm'):
options['armvariant'] = 'none'
def ParseEnvOverrides(arg, imports):

26
deps/v8/include/v8.h

@ -833,26 +833,13 @@ class V8EXPORT String : public Primitive {
* Returns true if the string is both external and ascii
*/
bool IsExternalAscii() const;
class V8EXPORT ExternalStringResourceBase {
public:
virtual ~ExternalStringResourceBase() {}
protected:
ExternalStringResourceBase() {}
private:
// Disallow copying and assigning.
ExternalStringResourceBase(const ExternalStringResourceBase&);
void operator=(const ExternalStringResourceBase&);
};
/**
* An ExternalStringResource is a wrapper around a two-byte string
* buffer that resides outside V8's heap. Implement an
* ExternalStringResource to manage the life cycle of the underlying
* buffer. Note that the string data must be immutable.
*/
class V8EXPORT ExternalStringResource
: public ExternalStringResourceBase {
class V8EXPORT ExternalStringResource { // NOLINT
public:
/**
* Override the destructor to manage the life cycle of the underlying
@ -865,6 +852,10 @@ class V8EXPORT String : public Primitive {
virtual size_t length() const = 0;
protected:
ExternalStringResource() {}
private:
// Disallow copying and assigning.
ExternalStringResource(const ExternalStringResource&);
void operator=(const ExternalStringResource&);
};
/**
@ -878,8 +869,7 @@ class V8EXPORT String : public Primitive {
* Use String::New or convert to 16 bit data for non-ASCII.
*/
class V8EXPORT ExternalAsciiStringResource
: public ExternalStringResourceBase {
class V8EXPORT ExternalAsciiStringResource { // NOLINT
public:
/**
* Override the destructor to manage the life cycle of the underlying
@ -892,6 +882,10 @@ class V8EXPORT String : public Primitive {
virtual size_t length() const = 0;
protected:
ExternalAsciiStringResource() {}
private:
// Disallow copying and assigning.
ExternalAsciiStringResource(const ExternalAsciiStringResource&);
void operator=(const ExternalAsciiStringResource&);
};
/**

7
deps/v8/src/SConscript

@ -106,6 +106,7 @@ SOURCES = {
zone.cc
"""),
'arch:arm': Split("""
arm/assembler-arm.cc
arm/builtins-arm.cc
arm/codegen-arm.cc
arm/constants-arm.cc
@ -122,12 +123,6 @@ SOURCES = {
arm/stub-cache-arm.cc
arm/virtual-frame-arm.cc
"""),
'armvariant:arm': Split("""
arm/assembler-arm.cc
"""),
'armvariant:thumb2': Split("""
arm/assembler-thumb2.cc
"""),
'arch:ia32': Split("""
ia32/assembler-ia32.cc
ia32/builtins-ia32.cc

94
deps/v8/src/api.cc

@ -3082,13 +3082,81 @@ i::Handle<i::String> NewExternalAsciiStringHandle(
}
static void DisposeExternalString(v8::Persistent<v8::Value> obj,
void* parameter) {
ENTER_V8;
i::ExternalTwoByteString* str =
i::ExternalTwoByteString::cast(*Utils::OpenHandle(*obj));
// External symbols are deleted when they are pruned out of the symbol
// table. Generally external symbols are not registered with the weak handle
// callbacks unless they are upgraded to a symbol after being externalized.
if (!str->IsSymbol()) {
v8::String::ExternalStringResource* resource =
reinterpret_cast<v8::String::ExternalStringResource*>(parameter);
if (resource != NULL) {
const int total_size =
static_cast<int>(resource->length() * sizeof(*resource->data()));
i::Counters::total_external_string_memory.Decrement(total_size);
// The object will continue to live in the JavaScript heap until the
// handle is entirely cleaned out by the next GC. For example the
// destructor for the resource below could bring it back to life again.
// Which is why we make sure to not have a dangling pointer here.
str->set_resource(NULL);
delete resource;
}
}
// In any case we do not need this handle any longer.
obj.Dispose();
}
static void DisposeExternalAsciiString(v8::Persistent<v8::Value> obj,
void* parameter) {
ENTER_V8;
i::ExternalAsciiString* str =
i::ExternalAsciiString::cast(*Utils::OpenHandle(*obj));
// External symbols are deleted when they are pruned out of the symbol
// table. Generally external symbols are not registered with the weak handle
// callbacks unless they are upgraded to a symbol after being externalized.
if (!str->IsSymbol()) {
v8::String::ExternalAsciiStringResource* resource =
reinterpret_cast<v8::String::ExternalAsciiStringResource*>(parameter);
if (resource != NULL) {
const int total_size =
static_cast<int>(resource->length() * sizeof(*resource->data()));
i::Counters::total_external_string_memory.Decrement(total_size);
// The object will continue to live in the JavaScript heap until the
// handle is entirely cleaned out by the next GC. For example the
// destructor for the resource below could bring it back to life again.
// Which is why we make sure to not have a dangling pointer here.
str->set_resource(NULL);
delete resource;
}
}
// In any case we do not need this handle any longer.
obj.Dispose();
}
Local<String> v8::String::NewExternal(
v8::String::ExternalStringResource* resource) {
EnsureInitialized("v8::String::NewExternal()");
LOG_API("String::NewExternal");
ENTER_V8;
const int total_size =
static_cast<int>(resource->length() * sizeof(*resource->data()));
i::Counters::total_external_string_memory.Increment(total_size);
i::Handle<i::String> result = NewExternalStringHandle(resource);
i::ExternalStringTable::AddString(*result);
i::Handle<i::Object> handle = i::GlobalHandles::Create(*result);
i::GlobalHandles::MakeWeak(handle.location(),
resource,
&DisposeExternalString);
return Utils::ToLocal(result);
}
@ -3100,7 +3168,13 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
i::Handle<i::String> obj = Utils::OpenHandle(this);
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
i::ExternalStringTable::AddString(*obj);
// Operation was successful and the string is not a symbol. In this case
// we need to make sure that the we call the destructor for the external
// resource when no strong references to the string remain.
i::Handle<i::Object> handle = i::GlobalHandles::Create(*obj);
i::GlobalHandles::MakeWeak(handle.location(),
resource,
&DisposeExternalString);
}
return result;
}
@ -3111,8 +3185,14 @@ Local<String> v8::String::NewExternal(
EnsureInitialized("v8::String::NewExternal()");
LOG_API("String::NewExternal");
ENTER_V8;
const int total_size =
static_cast<int>(resource->length() * sizeof(*resource->data()));
i::Counters::total_external_string_memory.Increment(total_size);
i::Handle<i::String> result = NewExternalAsciiStringHandle(resource);
i::ExternalStringTable::AddString(*result);
i::Handle<i::Object> handle = i::GlobalHandles::Create(*result);
i::GlobalHandles::MakeWeak(handle.location(),
resource,
&DisposeExternalAsciiString);
return Utils::ToLocal(result);
}
@ -3125,7 +3205,13 @@ bool v8::String::MakeExternal(
i::Handle<i::String> obj = Utils::OpenHandle(this);
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
i::ExternalStringTable::AddString(*obj);
// Operation was successful and the string is not a symbol. In this case
// we need to make sure that the we call the destructor for the external
// resource when no strong references to the string remain.
i::Handle<i::Object> handle = i::GlobalHandles::Create(*obj);
i::GlobalHandles::MakeWeak(handle.location(),
resource,
&DisposeExternalAsciiString);
}
return result;
}

186
deps/v8/src/arm/assembler-arm.cc

@ -114,55 +114,55 @@ CRegister cr15 = { 15 };
// Support for the VFP registers s0 to s31 (d0 to d15).
// Note that "sN:sM" is the same as "dN/2".
SwVfpRegister s0 = { 0 };
SwVfpRegister s1 = { 1 };
SwVfpRegister s2 = { 2 };
SwVfpRegister s3 = { 3 };
SwVfpRegister s4 = { 4 };
SwVfpRegister s5 = { 5 };
SwVfpRegister s6 = { 6 };
SwVfpRegister s7 = { 7 };
SwVfpRegister s8 = { 8 };
SwVfpRegister s9 = { 9 };
SwVfpRegister s10 = { 10 };
SwVfpRegister s11 = { 11 };
SwVfpRegister s12 = { 12 };
SwVfpRegister s13 = { 13 };
SwVfpRegister s14 = { 14 };
SwVfpRegister s15 = { 15 };
SwVfpRegister s16 = { 16 };
SwVfpRegister s17 = { 17 };
SwVfpRegister s18 = { 18 };
SwVfpRegister s19 = { 19 };
SwVfpRegister s20 = { 20 };
SwVfpRegister s21 = { 21 };
SwVfpRegister s22 = { 22 };
SwVfpRegister s23 = { 23 };
SwVfpRegister s24 = { 24 };
SwVfpRegister s25 = { 25 };
SwVfpRegister s26 = { 26 };
SwVfpRegister s27 = { 27 };
SwVfpRegister s28 = { 28 };
SwVfpRegister s29 = { 29 };
SwVfpRegister s30 = { 30 };
SwVfpRegister s31 = { 31 };
DwVfpRegister d0 = { 0 };
DwVfpRegister d1 = { 1 };
DwVfpRegister d2 = { 2 };
DwVfpRegister d3 = { 3 };
DwVfpRegister d4 = { 4 };
DwVfpRegister d5 = { 5 };
DwVfpRegister d6 = { 6 };
DwVfpRegister d7 = { 7 };
DwVfpRegister d8 = { 8 };
DwVfpRegister d9 = { 9 };
DwVfpRegister d10 = { 10 };
DwVfpRegister d11 = { 11 };
DwVfpRegister d12 = { 12 };
DwVfpRegister d13 = { 13 };
DwVfpRegister d14 = { 14 };
DwVfpRegister d15 = { 15 };
Register s0 = { 0 };
Register s1 = { 1 };
Register s2 = { 2 };
Register s3 = { 3 };
Register s4 = { 4 };
Register s5 = { 5 };
Register s6 = { 6 };
Register s7 = { 7 };
Register s8 = { 8 };
Register s9 = { 9 };
Register s10 = { 10 };
Register s11 = { 11 };
Register s12 = { 12 };
Register s13 = { 13 };
Register s14 = { 14 };
Register s15 = { 15 };
Register s16 = { 16 };
Register s17 = { 17 };
Register s18 = { 18 };
Register s19 = { 19 };
Register s20 = { 20 };
Register s21 = { 21 };
Register s22 = { 22 };
Register s23 = { 23 };
Register s24 = { 24 };
Register s25 = { 25 };
Register s26 = { 26 };
Register s27 = { 27 };
Register s28 = { 28 };
Register s29 = { 29 };
Register s30 = { 30 };
Register s31 = { 31 };
Register d0 = { 0 };
Register d1 = { 1 };
Register d2 = { 2 };
Register d3 = { 3 };
Register d4 = { 4 };
Register d5 = { 5 };
Register d6 = { 6 };
Register d7 = { 7 };
Register d8 = { 8 };
Register d9 = { 9 };
Register d10 = { 10 };
Register d11 = { 11 };
Register d12 = { 12 };
Register d13 = { 13 };
Register d14 = { 14 };
Register d15 = { 15 };
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@ -1371,10 +1371,11 @@ void Assembler::stc2(Coprocessor coproc,
// Support for VFP.
void Assembler::vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
const Condition cond) {
void Assembler::fmdrr(const Register dst,
const Register src1,
const Register src2,
const SBit s,
const Condition cond) {
// Dm = <Rt,Rt2>.
// Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
@ -1386,10 +1387,11 @@ void Assembler::vmov(const DwVfpRegister dst,
}
void Assembler::vmov(const Register dst1,
const Register dst2,
const DwVfpRegister src,
const Condition cond) {
void Assembler::fmrrd(const Register dst1,
const Register dst2,
const Register src,
const SBit s,
const Condition cond) {
// <Rt,Rt2> = Dm.
// Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
@ -1401,8 +1403,9 @@ void Assembler::vmov(const Register dst1,
}
void Assembler::vmov(const SwVfpRegister dst,
void Assembler::fmsr(const Register dst,
const Register src,
const SBit s,
const Condition cond) {
// Sn = Rt.
// Instruction details available in ARM DDI 0406A, A8-642.
@ -1415,8 +1418,9 @@ void Assembler::vmov(const SwVfpRegister dst,
}
void Assembler::vmov(const Register dst,
const SwVfpRegister src,
void Assembler::fmrs(const Register dst,
const Register src,
const SBit s,
const Condition cond) {
// Rt = Sn.
// Instruction details available in ARM DDI 0406A, A8-642.
@ -1429,9 +1433,10 @@ void Assembler::vmov(const Register dst,
}
void Assembler::vcvt(const DwVfpRegister dst,
const SwVfpRegister src,
const Condition cond) {
void Assembler::fsitod(const Register dst,
const Register src,
const SBit s,
const Condition cond) {
// Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
// Instruction details available in ARM DDI 0406A, A8-576.
// cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) |
@ -1443,9 +1448,10 @@ void Assembler::vcvt(const DwVfpRegister dst,
}
void Assembler::vcvt(const SwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
void Assembler::ftosid(const Register dst,
const Register src,
const SBit s,
const Condition cond) {
// Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
// Instruction details available in ARM DDI 0406A, A8-576.
// cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
@ -1457,11 +1463,12 @@ void Assembler::vcvt(const SwVfpRegister dst,
}
void Assembler::vadd(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond) {
// Dd = vadd(Dn, Dm) double precision floating point addition.
void Assembler::faddd(const Register dst,
const Register src1,
const Register src2,
const SBit s,
const Condition cond) {
// Dd = faddd(Dn, Dm) double precision floating point addition.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-536.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
@ -1472,11 +1479,12 @@ void Assembler::vadd(const DwVfpRegister dst,
}
void Assembler::vsub(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond) {
// Dd = vsub(Dn, Dm) double precision floating point subtraction.
void Assembler::fsubd(const Register dst,
const Register src1,
const Register src2,
const SBit s,
const Condition cond) {
// Dd = fsubd(Dn, Dm) double precision floating point subtraction.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
@ -1487,11 +1495,12 @@ void Assembler::vsub(const DwVfpRegister dst,
}
void Assembler::vmul(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond) {
// Dd = vmul(Dn, Dm) double precision floating point multiplication.
void Assembler::fmuld(const Register dst,
const Register src1,
const Register src2,
const SBit s,
const Condition cond) {
// Dd = fmuld(Dn, Dm) double precision floating point multiplication.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
@ -1502,11 +1511,12 @@ void Assembler::vmul(const DwVfpRegister dst,
}
void Assembler::vdiv(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond) {
// Dd = vdiv(Dn, Dm) double precision floating point division.
void Assembler::fdivd(const Register dst,
const Register src1,
const Register src2,
const SBit s,
const Condition cond) {
// Dd = fdivd(Dn, Dm) double precision floating point division.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-584.
// cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
@ -1517,8 +1527,8 @@ void Assembler::vdiv(const DwVfpRegister dst,
}
void Assembler::vcmp(const DwVfpRegister src1,
const DwVfpRegister src2,
void Assembler::fcmp(const Register src1,
const Register src2,
const SBit s,
const Condition cond) {
// vcmp(Dd, Dm) double precision floating point comparison.

221
deps/v8/src/arm/assembler-arm.h

@ -103,94 +103,57 @@ extern Register sp;
extern Register lr;
extern Register pc;
// Single word VFP register.
struct SwVfpRegister {
bool is_valid() const { return 0 <= code_ && code_ < 32; }
bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
int code() const {
ASSERT(is_valid());
return code_;
}
int bit() const {
ASSERT(is_valid());
return 1 << code_;
}
int code_;
};
// Double word VFP register.
struct DwVfpRegister {
// Supporting d0 to d15, can be later extended to d31.
bool is_valid() const { return 0 <= code_ && code_ < 16; }
bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
int code() const {
ASSERT(is_valid());
return code_;
}
int bit() const {
ASSERT(is_valid());
return 1 << code_;
}
int code_;
};
// Support for VFP registers s0 to s31 (d0 to d15).
// Note that "s(N):s(N+1)" is the same as "d(N/2)".
extern SwVfpRegister s0;
extern SwVfpRegister s1;
extern SwVfpRegister s2;
extern SwVfpRegister s3;
extern SwVfpRegister s4;
extern SwVfpRegister s5;
extern SwVfpRegister s6;
extern SwVfpRegister s7;
extern SwVfpRegister s8;
extern SwVfpRegister s9;
extern SwVfpRegister s10;
extern SwVfpRegister s11;
extern SwVfpRegister s12;
extern SwVfpRegister s13;
extern SwVfpRegister s14;
extern SwVfpRegister s15;
extern SwVfpRegister s16;
extern SwVfpRegister s17;
extern SwVfpRegister s18;
extern SwVfpRegister s19;
extern SwVfpRegister s20;
extern SwVfpRegister s21;
extern SwVfpRegister s22;
extern SwVfpRegister s23;
extern SwVfpRegister s24;
extern SwVfpRegister s25;
extern SwVfpRegister s26;
extern SwVfpRegister s27;
extern SwVfpRegister s28;
extern SwVfpRegister s29;
extern SwVfpRegister s30;
extern SwVfpRegister s31;
extern DwVfpRegister d0;
extern DwVfpRegister d1;
extern DwVfpRegister d2;
extern DwVfpRegister d3;
extern DwVfpRegister d4;
extern DwVfpRegister d5;
extern DwVfpRegister d6;
extern DwVfpRegister d7;
extern DwVfpRegister d8;
extern DwVfpRegister d9;
extern DwVfpRegister d10;
extern DwVfpRegister d11;
extern DwVfpRegister d12;
extern DwVfpRegister d13;
extern DwVfpRegister d14;
extern DwVfpRegister d15;
// Support for VFP registers s0 to s32 (d0 to d16).
// Note that "sN:sM" is the same as "dN/2".
extern Register s0;
extern Register s1;
extern Register s2;
extern Register s3;
extern Register s4;
extern Register s5;
extern Register s6;
extern Register s7;
extern Register s8;
extern Register s9;
extern Register s10;
extern Register s11;
extern Register s12;
extern Register s13;
extern Register s14;
extern Register s15;
extern Register s16;
extern Register s17;
extern Register s18;
extern Register s19;
extern Register s20;
extern Register s21;
extern Register s22;
extern Register s23;
extern Register s24;
extern Register s25;
extern Register s26;
extern Register s27;
extern Register s28;
extern Register s29;
extern Register s30;
extern Register s31;
extern Register d0;
extern Register d1;
extern Register d2;
extern Register d3;
extern Register d4;
extern Register d5;
extern Register d6;
extern Register d7;
extern Register d8;
extern Register d9;
extern Register d10;
extern Register d11;
extern Register d12;
extern Register d13;
extern Register d14;
extern Register d15;
// Coprocessor register
struct CRegister {
@ -796,45 +759,55 @@ class Assembler : public Malloced {
// However, some simple modifications can allow
// these APIs to support D16 to D31.
void vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
const Condition cond = al);
void vmov(const Register dst1,
const Register dst2,
const DwVfpRegister src,
const Condition cond = al);
void vmov(const SwVfpRegister dst,
void fmdrr(const Register dst,
const Register src1,
const Register src2,
const SBit s = LeaveCC,
const Condition cond = al);
void fmrrd(const Register dst1,
const Register dst2,
const Register src,
const SBit s = LeaveCC,
const Condition cond = al);
void fmsr(const Register dst,
const Register src,
const SBit s = LeaveCC,
const Condition cond = al);
void vmov(const Register dst,
const SwVfpRegister src,
const Condition cond = al);
void vcvt(const DwVfpRegister dst,
const SwVfpRegister src,
const Condition cond = al);
void vcvt(const SwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
void vadd(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
void vsub(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
void vmul(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
void vdiv(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
void fmrs(const Register dst,
const Register src,
const SBit s = LeaveCC,
const Condition cond = al);
void vcmp(const DwVfpRegister src1,
const DwVfpRegister src2,
void fsitod(const Register dst,
const Register src,
const SBit s = LeaveCC,
const Condition cond = al);
void ftosid(const Register dst,
const Register src,
const SBit s = LeaveCC,
const Condition cond = al);
void faddd(const Register dst,
const Register src1,
const Register src2,
const SBit s = LeaveCC,
const Condition cond = al);
void fsubd(const Register dst,
const Register src1,
const Register src2,
const SBit s = LeaveCC,
const Condition cond = al);
void fmuld(const Register dst,
const Register src1,
const Register src2,
const SBit s = LeaveCC,
const Condition cond = al);
void fdivd(const Register dst,
const Register src1,
const Register src2,
const SBit s = LeaveCC,
const Condition cond = al);
void fcmp(const Register src1,
const Register src2,
const SBit s = LeaveCC,
const Condition cond = al);
void vmrs(const Register dst,

267
deps/v8/src/arm/assembler-thumb2-inl.h

@ -1,267 +0,0 @@
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// - Redistribution in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the
// distribution.
//
// - Neither the name of Sun Microsystems or the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
// The original source code covered by the above license above has been modified
// significantly by Google Inc.
// Copyright 2006-2008 the V8 project authors. All rights reserved.
#ifndef V8_ARM_ASSEMBLER_THUMB2_INL_H_
#define V8_ARM_ASSEMBLER_THUMB2_INL_H_
#include "arm/assembler-thumb2.h"
#include "cpu.h"
namespace v8 {
namespace internal {
Condition NegateCondition(Condition cc) {
ASSERT(cc != al);
return static_cast<Condition>(cc ^ ne);
}
void RelocInfo::apply(intptr_t delta) {
if (RelocInfo::IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p += delta; // relocate entry
}
// We do not use pc relative addressing on ARM, so there is
// nothing else to do.
}
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
return Assembler::target_address_at(pc_);
}
Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_));
}
void RelocInfo::set_target_address(Address target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target);
}
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_at(Assembler::target_address_address_at(pc_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
}
Object** RelocInfo::target_object_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object**>(Assembler::target_address_address_at(pc_));
}
void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
}
Address* RelocInfo::target_reference_address() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address*>(Assembler::target_address_address_at(pc_));
}
Address RelocInfo::call_address() {
ASSERT(IsPatchedReturnSequence());
// The 2 instructions offset assumes patched return sequence.
ASSERT(IsJSReturn(rmode()));
return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
}
void RelocInfo::set_call_address(Address target) {
ASSERT(IsPatchedReturnSequence());
// The 2 instructions offset assumes patched return sequence.
ASSERT(IsJSReturn(rmode()));
Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
}
Object* RelocInfo::call_object() {
return *call_object_address();
}
Object** RelocInfo::call_object_address() {
ASSERT(IsPatchedReturnSequence());
// The 2 instructions offset assumes patched return sequence.
ASSERT(IsJSReturn(rmode()));
return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
}
void RelocInfo::set_call_object(Object* target) {
*call_object_address() = target;
}
bool RelocInfo::IsPatchedReturnSequence() {
// On ARM a "call instruction" is actually two instructions.
// mov lr, pc
// ldr pc, [pc, #XXX]
return (Assembler::instr_at(pc_) == kMovLrPc)
&& ((Assembler::instr_at(pc_ + Assembler::kInstrSize) & kLdrPCPattern)
== kLdrPCPattern);
}
Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
imm32_ = immediate;
rmode_ = rmode;
}
Operand::Operand(const char* s) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(s);
rmode_ = RelocInfo::EMBEDDED_STRING;
}
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Operand::Operand(Object** opp) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(opp);
rmode_ = RelocInfo::NONE;
}
Operand::Operand(Context** cpp) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(cpp);
rmode_ = RelocInfo::NONE;
}
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm32_ = reinterpret_cast<intptr_t>(value);
rmode_ = RelocInfo::NONE;
}
Operand::Operand(Register rm) {
rm_ = rm;
rs_ = no_reg;
shift_op_ = LSL;
shift_imm_ = 0;
}
bool Operand::is_reg() const {
return rm_.is_valid() &&
rs_.is(no_reg) &&
shift_op_ == LSL &&
shift_imm_ == 0;
}
void Assembler::CheckBuffer() {
if (buffer_space() <= kGap) {
GrowBuffer();
}
if (pc_offset() >= next_buffer_check_) {
CheckConstPool(false, true);
}
}
void Assembler::emit(Instr x) {
CheckBuffer();
*reinterpret_cast<Instr*>(pc_) = x;
pc_ += kInstrSize;
}
Address Assembler::target_address_address_at(Address pc) {
Instr instr = Memory::int32_at(pc);
// Verify that the instruction at pc is a ldr<cond> <Rd>, [pc +/- offset_12].
ASSERT((instr & 0x0f7f0000) == 0x051f0000);
int offset = instr & 0xfff; // offset_12 is unsigned
if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
// Verify that the constant pool comes after the instruction referencing it.
ASSERT(offset >= -4);
return pc + offset + 8;
}
Address Assembler::target_address_at(Address pc) {
return Memory::Address_at(target_address_address_at(pc));
}
void Assembler::set_target_at(Address constant_pool_entry,
Address target) {
Memory::Address_at(constant_pool_entry) = target;
}
void Assembler::set_target_address_at(Address pc, Address target) {
Memory::Address_at(target_address_address_at(pc)) = target;
// Intuitively, we would think it is necessary to flush the instruction cache
// after patching a target address in the code as follows:
// CPU::FlushICache(pc, sizeof(target));
// However, on ARM, no instruction was actually patched by the assignment
// above; the target address is not part of an instruction, it is patched in
// the constant pool and is read via a data access; the instruction accessing
// this address in the constant pool remains unchanged.
}
} } // namespace v8::internal
#endif // V8_ARM_ASSEMBLER_THUMB2_INL_H_

1821
deps/v8/src/arm/assembler-thumb2.cc

File diff suppressed because it is too large

1027
deps/v8/src/arm/assembler-thumb2.h

File diff suppressed because it is too large

92
deps/v8/src/arm/codegen-arm.cc

@ -1769,7 +1769,9 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
primitive.Bind();
frame_->EmitPush(r0);
frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
Result arg_count(r0);
__ mov(r0, Operand(0));
frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, &arg_count, 1);
jsobject.Bind();
// Get the set of properties (as a FixedArray or Map).
@ -1908,7 +1910,9 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
__ ldr(r0, frame_->ElementAt(4)); // push enumerable
frame_->EmitPush(r0);
frame_->EmitPush(r3); // push entry
frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
Result arg_count_reg(r0);
__ mov(r0, Operand(1));
frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, &arg_count_reg, 2);
__ mov(r3, Operand(r0));
// If the property has been removed while iterating, we just skip it.
@ -3656,7 +3660,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
if (property != NULL) {
LoadAndSpill(property->obj());
LoadAndSpill(property->key());
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Result arg_count(r0);
__ mov(r0, Operand(1)); // not counting receiver
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
} else if (variable != NULL) {
Slot* slot = variable->slot();
@ -3664,7 +3670,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
LoadGlobal();
__ mov(r0, Operand(variable->name()));
frame_->EmitPush(r0);
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Result arg_count(r0);
__ mov(r0, Operand(1)); // not counting receiver
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// lookup the context holding the named variable
@ -3676,7 +3684,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(r0);
__ mov(r0, Operand(variable->name()));
frame_->EmitPush(r0);
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
Result arg_count(r0);
__ mov(r0, Operand(1)); // not counting receiver
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
} else {
// Default: Result of deleting non-global, not dynamically
@ -3726,7 +3736,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
smi_label.Branch(eq);
frame_->EmitPush(r0);
frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, 1);
Result arg_count(r0);
__ mov(r0, Operand(0)); // not counting receiver
frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, &arg_count, 1);
continue_label.Jump();
smi_label.Bind();
@ -3748,7 +3760,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
__ tst(r0, Operand(kSmiTagMask));
continue_label.Branch(eq);
frame_->EmitPush(r0);
frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
Result arg_count(r0);
__ mov(r0, Operand(0)); // not counting receiver
frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
continue_label.Bind();
break;
}
@ -3833,7 +3847,9 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
{
// Convert the operand to a number.
frame_->EmitPush(r0);
frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
Result arg_count(r0);
__ mov(r0, Operand(0));
frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
}
if (is_postfix) {
// Postfix: store to result (on the stack).
@ -4219,7 +4235,9 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
case Token::IN: {
LoadAndSpill(left);
LoadAndSpill(right);
frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
Result arg_count(r0);
__ mov(r0, Operand(1)); // not counting receiver
frame_->InvokeBuiltin(Builtins::IN, CALL_JS, &arg_count, 2);
frame_->EmitPush(r0);
break;
}
@ -5061,10 +5079,10 @@ void CompareStub::Generate(MacroAssembler* masm) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// ARMv7 VFP3 instructions to implement double precision comparison.
__ vmov(d6, r0, r1);
__ vmov(d7, r2, r3);
__ fmdrr(d6, r0, r1);
__ fmdrr(d7, r2, r3);
__ vcmp(d6, d7);
__ fcmp(d6, d7);
__ vmrs(pc);
__ mov(r0, Operand(0), LeaveCC, eq);
__ mov(r0, Operand(1), LeaveCC, lt);
@ -5127,6 +5145,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ mov(r0, Operand(arg_count));
__ InvokeBuiltin(native, CALL_JS);
__ cmp(r0, Operand(0));
__ pop(pc);
@ -5225,6 +5244,7 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// Only first argument is a string.
__ bind(&string1);
__ mov(r0, Operand(2)); // Set number of arguments.
__ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
// First argument was not a string, test second.
@ -5236,11 +5256,13 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// Only second argument is a string.
__ b(&not_strings);
__ mov(r0, Operand(2)); // Set number of arguments.
__ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
__ bind(&not_strings);
}
__ mov(r0, Operand(1)); // Set number of arguments.
__ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
// We branch here if at least one of r0 and r1 is not a Smi.
@ -5331,22 +5353,22 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
CpuFeatures::Scope scope(VFP3);
// ARMv7 VFP3 instructions to implement
// double precision, add, subtract, multiply, divide.
__ vmov(d6, r0, r1);
__ vmov(d7, r2, r3);
__ fmdrr(d6, r0, r1);
__ fmdrr(d7, r2, r3);
if (Token::MUL == operation) {
__ vmul(d5, d6, d7);
__ fmuld(d5, d6, d7);
} else if (Token::DIV == operation) {
__ vdiv(d5, d6, d7);
__ fdivd(d5, d6, d7);
} else if (Token::ADD == operation) {
__ vadd(d5, d6, d7);
__ faddd(d5, d6, d7);
} else if (Token::SUB == operation) {
__ vsub(d5, d6, d7);
__ fsubd(d5, d6, d7);
} else {
UNREACHABLE();
}
__ vmov(r0, r1, d5);
__ fmrrd(r0, r1, d5);
__ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
__ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
@ -5435,9 +5457,9 @@ static void GetInt32(MacroAssembler* masm,
// ARMv7 VFP3 instructions implementing double precision to integer
// conversion using round to zero.
__ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
__ vmov(d7, scratch2, scratch);
__ vcvt(s15, d7);
__ vmov(dest, s15);
__ fmdrr(d7, scratch2, scratch);
__ ftosid(s15, d7);
__ fmrs(dest, s15);
} else {
// Get the top bits of the mantissa.
__ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
@ -5576,6 +5598,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
__ bind(&slow);
__ push(r1); // restore stack
__ push(r0);
__ mov(r0, Operand(1)); // 1 argument (not counting receiver).
switch (op_) {
case Token::BIT_OR:
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
@ -5680,29 +5703,6 @@ static void MultiplyByKnownInt2(
}
const char* GenericBinaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int len = 100;
name_ = Bootstrapper::AllocateAutoDeletedArray(len);
if (name_ == NULL) return "OOM";
const char* op_name = Token::Name(op_);
const char* overwrite_name;
switch (mode_) {
case NO_OVERWRITE: overwrite_name = "Alloc"; break;
case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
default: overwrite_name = "UnknownOverwrite"; break;
}
OS::SNPrintF(Vector<char>(name_, len),
"GenericBinaryOpStub_%s_%s%s",
op_name,
overwrite_name,
specialized_on_rhs_ ? "_ConstantRhs" : 0);
return name_;
}
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// r1 : x
// r0 : y
@ -5980,6 +5980,7 @@ void UnarySubStub::Generate(MacroAssembler* masm) {
// Enter runtime system.
__ bind(&slow);
__ push(r0);
__ mov(r0, Operand(0)); // Set number of arguments.
__ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
__ bind(&not_smi);
@ -6455,6 +6456,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Slow-case. Tail call builtin.
__ bind(&slow);
__ mov(r0, Operand(1)); // Arg count without receiver.
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
}

21
deps/v8/src/arm/codegen-arm.h

@ -455,15 +455,13 @@ class GenericBinaryOpStub : public CodeStub {
: op_(op),
mode_(mode),
constant_rhs_(constant_rhs),
specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
name_(NULL) { }
specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)) { }
private:
Token::Value op_;
OverwriteMode mode_;
int constant_rhs_;
bool specialized_on_rhs_;
char* name_;
static const int kMaxKnownRhs = 0x40000000;
@ -508,7 +506,22 @@ class GenericBinaryOpStub : public CodeStub {
return key;
}
const char* GetName();
const char* GetName() {
switch (op_) {
case Token::ADD: return "GenericBinaryOpStub_ADD";
case Token::SUB: return "GenericBinaryOpStub_SUB";
case Token::MUL: return "GenericBinaryOpStub_MUL";
case Token::DIV: return "GenericBinaryOpStub_DIV";
case Token::MOD: return "GenericBinaryOpStub_MOD";
case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
case Token::SAR: return "GenericBinaryOpStub_SAR";
case Token::SHL: return "GenericBinaryOpStub_SHL";
case Token::SHR: return "GenericBinaryOpStub_SHR";
default: return "GenericBinaryOpStub";
}
}
#ifdef DEBUG
void Print() {

21
deps/v8/src/arm/disasm-arm.cc

@ -897,14 +897,15 @@ void Decoder::DecodeUnconditional(Instr* instr) {
// void Decoder::DecodeTypeVFP(Instr* instr)
// vmov: Sn = Rt
// vmov: Rt = Sn
// vcvt: Dd = Sm
// vcvt: Sd = Dm
// Dd = vadd(Dn, Dm)
// Dd = vsub(Dn, Dm)
// Dd = vmul(Dn, Dm)
// Dd = vdiv(Dn, Dm)
// Implements the following VFP instructions:
// fmsr: Sn = Rt
// fmrs: Rt = Sn
// fsitod: Dd = Sm
// ftosid: Sd = Dm
// Dd = faddd(Dn, Dm)
// Dd = fsubd(Dn, Dm)
// Dd = fmuld(Dn, Dm)
// Dd = fdivd(Dn, Dm)
// vcmp(Dd, Dm)
// VMRS
void Decoder::DecodeTypeVFP(Instr* instr) {
@ -996,8 +997,8 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
// Decode Type 6 coprocessor instructions.
// Dm = vmov(Rt, Rt2)
// <Rt, Rt2> = vmov(Dm)
// Dm = fmdrr(Rt, Rt2)
// <Rt, Rt2> = fmrrd(Dm)
void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6));

350
deps/v8/src/arm/fast-codegen-arm.cc

@ -414,98 +414,78 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
Variable* var = decl->proxy()->var();
ASSERT(var != NULL); // Must have been resolved.
Slot* slot = var->slot();
Property* prop = var->AsProperty();
if (slot != NULL) {
switch (slot->type()) {
case Slot::PARAMETER: // Fall through.
case Slot::LOCAL:
if (decl->mode() == Variable::CONST) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ str(ip, MemOperand(fp, SlotOffset(var->slot())));
} else if (decl->fun() != NULL) {
Visit(decl->fun());
__ pop(ip);
__ str(ip, MemOperand(fp, SlotOffset(var->slot())));
}
break;
case Slot::CONTEXT:
// The variable in the decl always resides in the current context.
ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
ASSERT(slot != NULL); // No global declarations here.
// We have 3 cases for slots: LOOKUP, LOCAL, CONTEXT.
switch (slot->type()) {
case Slot::LOOKUP: {
__ mov(r2, Operand(var->name()));
// Declaration nodes are always introduced in one of two modes.
ASSERT(decl->mode() == Variable::VAR || decl->mode() == Variable::CONST);
PropertyAttributes attr = decl->mode() == Variable::VAR ?
NONE : READ_ONLY;
__ mov(r1, Operand(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (decl->mode() == Variable::CONST) {
__ mov(r0, Operand(Factory::the_hole_value()));
__ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
} else if (decl->fun() != NULL) {
__ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit());
Visit(decl->fun()); // Initial value for function decl.
} else {
__ mov(r0, Operand(Smi::FromInt(0))); // No initial value!
__ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
}
__ CallRuntime(Runtime::kDeclareContextSlot, 4);
break;
}
case Slot::LOCAL:
if (decl->mode() == Variable::CONST) {
__ mov(r0, Operand(Factory::the_hole_value()));
__ str(r0, MemOperand(fp, SlotOffset(var->slot())));
} else if (decl->fun() != NULL) {
Visit(decl->fun());
__ pop(r0);
__ str(r0, MemOperand(fp, SlotOffset(var->slot())));
}
break;
case Slot::CONTEXT:
// The variable in the decl always resides in the current context.
ASSERT(function_->scope()->ContextChainLength(slot->var()->scope()) == 0);
if (decl->mode() == Variable::CONST) {
__ mov(r0, Operand(Factory::the_hole_value()));
if (FLAG_debug_code) {
// Check if we have the correct context pointer.
__ ldr(r1,
CodeGenerator::ContextOperand(cp, Context::FCONTEXT_INDEX));
__ ldr(r1, CodeGenerator::ContextOperand(cp,
Context::FCONTEXT_INDEX));
__ cmp(r1, cp);
__ Check(eq, "Unexpected declaration in current context.");
}
if (decl->mode() == Variable::CONST) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ str(ip, CodeGenerator::ContextOperand(cp, slot->index()));
// No write barrier since the_hole_value is in old space.
} else if (decl->fun() != NULL) {
Visit(decl->fun());
__ pop(r0);
__ str(r0, CodeGenerator::ContextOperand(cp, slot->index()));
int offset = Context::SlotOffset(slot->index());
__ mov(r2, Operand(offset));
// We know that we have written a function, which is not a smi.
__ RecordWrite(cp, r2, r0);
}
break;
case Slot::LOOKUP: {
__ mov(r2, Operand(var->name()));
// Declaration nodes are always introduced in one of two modes.
ASSERT(decl->mode() == Variable::VAR ||
decl->mode() == Variable::CONST);
PropertyAttributes attr =
(decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
__ mov(r1, Operand(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (decl->mode() == Variable::CONST) {
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
__ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
} else if (decl->fun() != NULL) {
__ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit());
Visit(decl->fun()); // Initial value for function decl.
} else {
__ mov(r0, Operand(Smi::FromInt(0))); // No initial value!
__ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
}
__ CallRuntime(Runtime::kDeclareContextSlot, 4);
break;
}
}
} else if (prop != NULL) {
if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
// We are declaring a function or constant that rewrites to a
// property. Use (keyed) IC to set the initial value.
ASSERT_EQ(Expression::kValue, prop->obj()->context());
Visit(prop->obj());
ASSERT_EQ(Expression::kValue, prop->key()->context());
Visit(prop->key());
if (decl->fun() != NULL) {
ASSERT_EQ(Expression::kValue, decl->fun()->context());
__ str(r0, CodeGenerator::ContextOperand(cp, slot->index()));
// No write barrier since the_hole_value is in old space.
ASSERT(!Heap::InNewSpace(*Factory::the_hole_value()));
} else if (decl->fun() != NULL) {
Visit(decl->fun());
__ pop(r0);
} else {
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
if (FLAG_debug_code) {
// Check if we have the correct context pointer.
__ ldr(r1, CodeGenerator::ContextOperand(cp,
Context::FCONTEXT_INDEX));
__ cmp(r1, cp);
__ Check(eq, "Unexpected declaration in current context.");
}
__ str(r0, CodeGenerator::ContextOperand(cp, slot->index()));
int offset = Context::SlotOffset(slot->index());
__ mov(r2, Operand(offset));
// We know that we have written a function, which is not a smi.
__ RecordWrite(cp, r2, r0);
}
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// Value in r0 is ignored (declarations are statements). Receiver
// and key on stack are discarded.
__ add(sp, sp, Operand(2 * kPointerSize));
}
break;
default:
UNREACHABLE();
}
}
@ -521,6 +501,21 @@ void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
Expression* expr = stmt->expression();
// Complete the statement based on the type of the subexpression.
if (expr->AsLiteral() != NULL) {
__ mov(r0, Operand(expr->AsLiteral()->handle()));
} else {
ASSERT_EQ(Expression::kValue, expr->context());
Visit(expr);
__ pop(r0);
}
EmitReturnSequence(stmt->statement_pos());
}
void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
@ -541,24 +536,18 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
EmitVariableLoad(expr->var(), expr->context());
}
void FastCodeGenerator::EmitVariableLoad(Variable* var,
Expression::Context context) {
Expression* rewrite = var->rewrite();
Expression* rewrite = expr->var()->rewrite();
if (rewrite == NULL) {
ASSERT(var->is_global());
ASSERT(expr->var()->is_global());
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object on the stack.
__ ldr(ip, CodeGenerator::GlobalObject());
__ push(ip);
__ mov(r2, Operand(var->name()));
__ mov(r2, Operand(expr->name()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
DropAndMove(context, r0);
DropAndMove(expr->context(), r0);
} else if (rewrite->AsSlot() != NULL) {
Slot* slot = rewrite->AsSlot();
if (FLAG_debug_code) {
@ -579,7 +568,7 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
UNREACHABLE();
}
}
Move(context, slot, r0);
Move(expr->context(), slot, r0);
} else {
// A variable has been rewritten into an explicit access to
// an object property.
@ -614,7 +603,7 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
__ Call(ic, RelocInfo::CODE_TARGET);
// Drop key and object left on the stack by IC, and push the result.
DropAndMove(context, r0, 2);
DropAndMove(expr->context(), r0, 2);
}
}
@ -648,15 +637,32 @@ void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Label boilerplate_exists;
__ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
// r2 = literal array (0).
__ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
int literal_offset =
FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
__ ldr(r0, FieldMemOperand(r2, literal_offset));
// Check whether we need to materialize the object literal boilerplate.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, Operand(ip));
__ b(ne, &boilerplate_exists);
// Create boilerplate if it does not exist.
// r1 = literal index (1).
__ mov(r1, Operand(Smi::FromInt(expr->literal_index())));
// r0 = constant properties (2).
__ mov(r0, Operand(expr->constant_properties()));
__ stm(db_w, sp, r2.bit() | r1.bit() | r0.bit());
__ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
__ bind(&boilerplate_exists);
// r0 contains boilerplate.
// Clone boilerplate.
__ push(r0);
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateObjectLiteral, 3);
__ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
} else {
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
__ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
}
// If result_saved == true: The result is saved on top of the
@ -757,15 +763,32 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
Label make_clone;
// Fetch the function's literals array.
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
// Check if the literal's boilerplate has been instantiated.
int offset =
FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
__ ldr(r0, FieldMemOperand(r3, offset));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
__ b(&make_clone, ne);
// Instantiate the boilerplate.
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(expr->literals()));
__ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
__ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
__ bind(&make_clone);
// Clone the boilerplate.
__ push(r0);
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
__ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
} else {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
__ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
}
bool result_saved = false; // Is the result saved to the stack?
@ -837,38 +860,10 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop,
Expression::Context context) {
Literal* key = prop->key()->AsLiteral();
__ mov(r2, Operand(key->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
Move(context, r0);
}
void FastCodeGenerator::EmitKeyedPropertyLoad(Expression::Context context) {
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
Move(context, r0);
}
void FastCodeGenerator::EmitCompoundAssignmentOp(Token::Value op,
Expression::Context context) {
__ pop(r0);
__ pop(r1);
GenericBinaryOpStub stub(op,
NO_OVERWRITE);
__ CallStub(&stub);
Move(context, r0);
}
void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
if (var->is_global()) {
// Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in r0, variable name in
@ -981,6 +976,35 @@ void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
UNREACHABLE();
break;
}
} else {
Property* property = var->rewrite()->AsProperty();
ASSERT_NOT_NULL(property);
// Load object and key onto the stack.
Slot* object_slot = property->obj()->AsSlot();
ASSERT_NOT_NULL(object_slot);
Move(Expression::kValue, object_slot, r0);
Literal* key_literal = property->key()->AsLiteral();
ASSERT_NOT_NULL(key_literal);
Move(Expression::kValue, key_literal);
// Value to store was pushed before object and key on the stack.
__ ldr(r0, MemOperand(sp, 2 * kPointerSize));
// Arguments to ic is value in r0, object and key on stack.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
if (expr->context() == Expression::kEffect) {
__ add(sp, sp, Operand(3 * kPointerSize));
} else if (expr->context() == Expression::kValue) {
// Value is still on the stack in esp[2 * kPointerSize]
__ add(sp, sp, Operand(2 * kPointerSize));
} else {
__ ldr(r0, MemOperand(sp, 2 * kPointerSize));
DropAndMove(expr->context(), r0, 3);
}
}
}
@ -1080,9 +1104,7 @@ void FastCodeGenerator::VisitProperty(Property* expr) {
DropAndMove(expr->context(), r0);
}
void FastCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> ignored,
RelocInfo::Mode mode) {
void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) {
// Code common for calls using the IC.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@ -1095,7 +1117,7 @@ void FastCodeGenerator::EmitCallWithIC(Call* expr,
// Call the IC initialization code.
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
NOT_IN_LOOP);
__ Call(ic, mode);
__ Call(ic, reloc_info);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
@ -1135,7 +1157,7 @@ void FastCodeGenerator::VisitCall(Call* expr) {
// Push global object as receiver for the call IC lookup.
__ ldr(r0, CodeGenerator::GlobalObject());
__ stm(db_w, sp, r1.bit() | r0.bit());
EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT);
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
// Call to a lookup slot.
@ -1149,7 +1171,7 @@ void FastCodeGenerator::VisitCall(Call* expr) {
__ mov(r0, Operand(key->handle()));
__ push(r0);
Visit(prop->obj());
EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
EmitCallWithIC(expr, RelocInfo::CODE_TARGET);
} else {
// Call to a keyed property, use keyed load IC followed by function
// call.
@ -1684,63 +1706,7 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
Move(expr->context(), r0);
}
Register FastCodeGenerator::result_register() { return r0; }
Register FastCodeGenerator::context_register() { return cp; }
void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ str(value, MemOperand(fp, frame_offset));
}
void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
__ ldr(dst, CodeGenerator::ContextOperand(cp, context_index));
}
// ----------------------------------------------------------------------------
// Non-local control flow support.
void FastCodeGenerator::EnterFinallyBlock() {
ASSERT(!result_register().is(r1));
// Store result register while executing finally block.
__ push(result_register());
// Cook return address in link register to stack (smi encoded Code* delta)
__ sub(r1, lr, Operand(masm_->CodeObject()));
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
ASSERT_EQ(0, kSmiTag);
__ add(r1, r1, Operand(r1)); // Convert to smi.
__ push(r1);
}
void FastCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(r1));
// Restore result register from stack.
__ pop(r1);
// Uncook return address and return.
__ pop(result_register());
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
__ mov(r1, Operand(r1, ASR, 1)); // Un-smi-tag value.
__ add(pc, r1, Operand(masm_->CodeObject()));
}
void FastCodeGenerator::ThrowException() {
__ push(result_register());
__ CallRuntime(Runtime::kThrow, 1);
}
#undef __
} } // namespace v8::internal

4
deps/v8/src/arm/frames-arm.cc

@ -28,11 +28,7 @@
#include "v8.h"
#include "frames-inl.h"
#ifdef V8_ARM_VARIANT_THUMB
#include "arm/assembler-thumb2-inl.h"
#else
#include "arm/assembler-arm-inl.h"
#endif
namespace v8 {

10
deps/v8/src/arm/ic-arm.cc

@ -276,7 +276,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm, argc);
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
}
@ -371,11 +371,13 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm, argc);
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
}
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
void CallIC::Generate(MacroAssembler* masm,
int argc,
const ExternalReference& f) {
// ----------- S t a t e -------------
// -- lr: return address
// -----------------------------------
@ -392,7 +394,7 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// Call the entry.
__ mov(r0, Operand(2));
__ mov(r1, Operand(ExternalReference(IC_Utility(kCallIC_Miss))));
__ mov(r1, Operand(f));
CEntryStub stub(1);
__ CallStub(&stub);

30
deps/v8/src/arm/macro-assembler-arm.cc

@ -162,21 +162,6 @@ void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
}
void MacroAssembler::Drop(int stack_elements, Condition cond) {
if (stack_elements > 0) {
add(sp, sp, Operand(stack_elements * kPointerSize), LeaveCC, cond);
}
}
void MacroAssembler::Call(Label* target) {
bl(target);
}
void MacroAssembler::Move(Register dst, Handle<Object> value) {
mov(dst, Operand(value));
}
void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
@ -643,15 +628,6 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
}
void MacroAssembler::PopTryHandler() {
ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
pop(r1);
mov(ip, Operand(ExternalReference(Top::k_handler_address)));
add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
str(r1, MemOperand(ip));
}
Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
JSObject* holder, Register holder_reg,
Register scratch,
@ -1018,9 +994,9 @@ void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
Register outLowReg) {
// ARMv7 VFP3 instructions to implement integer to double conversion.
mov(r7, Operand(inReg, ASR, kSmiTagSize));
vmov(s15, r7);
vcvt(d7, s15);
vmov(outLowReg, outHighReg, d7);
fmsr(s15, r7);
fsitod(d7, s15);
fmrrd(outLowReg, outHighReg, d7);
}

6
deps/v8/src/arm/macro-assembler-arm.h

@ -64,9 +64,6 @@ class MacroAssembler: public Assembler {
void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Ret(Condition cond = al);
void Drop(int stack_elements, Condition cond = al);
void Call(Label* target);
void Move(Register dst, Handle<Object> value);
// Jumps to the label at the index given by the Smi in "index".
void SmiJumpTable(Register index, Vector<Label*> targets);
// Load an object from the root table.
@ -151,9 +148,6 @@ class MacroAssembler: public Assembler {
// On exit, r0 contains TOS (code slot).
void PushTryHandler(CodeLocation try_location, HandlerType type);
// Unlink the stack handler on top of the stack from the try handler chain.
// Must preserve the result register.
void PopTryHandler();
// ---------------------------------------------------------------------------
// Inline caching support

20
deps/v8/src/arm/simulator-arm.cc

@ -1893,14 +1893,14 @@ void Simulator::DecodeUnconditional(Instr* instr) {
// void Simulator::DecodeTypeVFP(Instr* instr)
// The Following ARMv7 VFPv instructions are currently supported.
// vmov :Sn = Rt
// vmov :Rt = Sn
// vcvt: Dd = Sm
// vcvt: Sd = Dm
// Dd = vadd(Dn, Dm)
// Dd = vsub(Dn, Dm)
// Dd = vmul(Dn, Dm)
// Dd = vdiv(Dn, Dm)
// fmsr :Sn = Rt
// fmrs :Rt = Sn
// fsitod: Dd = Sm
// ftosid: Sd = Dm
// Dd = faddd(Dn, Dm)
// Dd = fsubd(Dn, Dm)
// Dd = fmuld(Dn, Dm)
// Dd = fdivd(Dn, Dm)
// vcmp(Dd, Dm)
// VMRS
void Simulator::DecodeTypeVFP(Instr* instr) {
@ -2020,8 +2020,8 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
// void Simulator::DecodeType6CoprocessorIns(Instr* instr)
// Decode Type 6 coprocessor instructions.
// Dm = vmov(Rt, Rt2)
// <Rt, Rt2> = vmov(Dm)
// Dm = fmdrr(Rt, Rt2)
// <Rt, Rt2> = fmrrd(Dm)
void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6));

47
deps/v8/src/arm/stub-cache-arm.cc

@ -446,7 +446,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
}
bool StubCompiler::GenerateLoadCallback(JSObject* object,
void StubCompiler::GenerateLoadCallback(JSObject* object,
JSObject* holder,
Register receiver,
Register name_reg,
@ -454,8 +454,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
Register scratch2,
AccessorInfo* callback,
String* name,
Label* miss,
Failure** failure) {
Label* miss) {
// Check that the receiver isn't a smi.
__ tst(receiver, Operand(kSmiTagMask));
__ b(eq, miss);
@ -477,8 +476,6 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
ExternalReference load_callback_property =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallRuntime(load_callback_property, 5, 1);
return true;
}
@ -777,26 +774,8 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
__ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
if (Heap::InNewSpace(function)) {
// We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
// function can all use this call IC. Before we load through the
// function, we have to verify that it still is a function.
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &miss);
__ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
__ b(ne, &miss);
// Check the shared function info. Make sure it hasn't changed.
__ mov(r3, Operand(Handle<SharedFunctionInfo>(function->shared())));
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ cmp(r2, r3);
__ b(ne, &miss);
} else {
__ cmp(r1, Operand(Handle<JSFunction>(function)));
__ b(ne, &miss);
}
__ cmp(r1, Operand(Handle<JSFunction>(function)));
__ b(ne, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@ -1024,10 +1003,10 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
}
Object* LoadStubCompiler::CompileLoadCallback(String* name,
JSObject* object,
Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
JSObject* holder,
AccessorInfo* callback) {
AccessorInfo* callback,
String* name) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@ -1036,11 +1015,7 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name,
Label miss;
__ ldr(r0, MemOperand(sp, 0));
Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1,
callback, name, &miss, &failure);
if (!success) return failure;
GenerateLoadCallback(object, holder, r0, r2, r3, r1, callback, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@ -1193,11 +1168,7 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
__ cmp(r2, Operand(Handle<String>(name)));
__ b(ne, &miss);
Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(receiver, holder, r0, r2, r3, r1,
callback, name, &miss, &failure);
if (!success) return failure;
GenerateLoadCallback(receiver, holder, r0, r2, r3, r1, callback, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);

3
deps/v8/src/arm/virtual-frame-arm.cc

@ -243,8 +243,11 @@ void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
Result* arg_count_register,
int arg_count) {
ASSERT(arg_count_register->reg().is(r0));
PrepareForCall(arg_count, arg_count);
arg_count_register->Unuse();
__ InvokeBuiltin(id, flags);
}

1
deps/v8/src/arm/virtual-frame-arm.h

@ -305,6 +305,7 @@ class VirtualFrame : public ZoneObject {
// removes from) the stack.
void InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flag,
Result* arg_count_register,
int arg_count);
// Call into an IC stub given the number of arguments it removes

10
deps/v8/src/assembler.cc

@ -573,16 +573,6 @@ ExternalReference ExternalReference::random_positive_smi_function() {
}
ExternalReference ExternalReference::keyed_lookup_cache_keys() {
return ExternalReference(KeyedLookupCache::keys_address());
}
ExternalReference ExternalReference::keyed_lookup_cache_field_offsets() {
return ExternalReference(KeyedLookupCache::field_offsets_address());
}
ExternalReference ExternalReference::the_hole_value_location() {
return ExternalReference(Factory::the_hole_value().location());
}

4
deps/v8/src/assembler.h

@ -401,10 +401,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference builtin_passed_function();
static ExternalReference random_positive_smi_function();
// Static data in the keyed lookup cache.
static ExternalReference keyed_lookup_cache_keys();
static ExternalReference keyed_lookup_cache_field_offsets();
// Static variable Factory::the_hole_value.location()
static ExternalReference the_hole_value_location();

17
deps/v8/src/ast.h

@ -139,7 +139,6 @@ class AstNode: public ZoneObject {
virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
virtual ObjectLiteral* AsObjectLiteral() { return NULL; }
virtual ArrayLiteral* AsArrayLiteral() { return NULL; }
virtual CompareOperation* AsCompareOperation() { return NULL; }
};
@ -193,13 +192,13 @@ class Expression: public AstNode {
virtual void MarkAsStatement() { /* do nothing */ }
// Static type information for this expression.
StaticType* type() { return &type_; }
SmiAnalysis* type() { return &type_; }
Context context() { return context_; }
void set_context(Context context) { context_ = context; }
private:
StaticType type_;
SmiAnalysis type_;
Context context_;
};
@ -1186,7 +1185,7 @@ class CountOperation: public Expression {
class CompareOperation: public Expression {
public:
CompareOperation(Token::Value op, Expression* left, Expression* right)
: op_(op), left_(left), right_(right), is_for_loop_condition_(false) {
: op_(op), left_(left), right_(right) {
ASSERT(Token::IsCompareOp(op));
}
@ -1196,18 +1195,10 @@ class CompareOperation: public Expression {
Expression* left() const { return left_; }
Expression* right() const { return right_; }
// Accessors for flag whether this compare operation is hanging of a for loop.
bool is_for_loop_condition() const { return is_for_loop_condition_; }
void set_is_for_loop_condition() { is_for_loop_condition_ = true; }
// Type testing & conversion
virtual CompareOperation* AsCompareOperation() { return this; }
private:
Token::Value op_;
Expression* left_;
Expression* right_;
bool is_for_loop_condition_;
};
@ -1250,8 +1241,6 @@ class Assignment: public Expression {
Expression* target() const { return target_; }
Expression* value() const { return value_; }
int position() { return pos_; }
// This check relies on the definition order of token in token.h.
bool is_compound() const { return op() > Token::ASSIGN; }
// An initialization block is a series of statments of the form
// x.y.z.a = ...; x.y.z.b = ...; etc. The parser marks the beginning and

26
deps/v8/src/bootstrapper.cc

@ -95,8 +95,6 @@ static SourceCodeCache natives_cache(Script::TYPE_NATIVE);
static SourceCodeCache extensions_cache(Script::TYPE_EXTENSION);
// This is for delete, not delete[].
static List<char*>* delete_these_non_arrays_on_tear_down = NULL;
// This is for delete[]
static List<char*>* delete_these_arrays_on_tear_down = NULL;
NativesExternalStringResource::NativesExternalStringResource(const char* source)
@ -152,41 +150,17 @@ void Bootstrapper::Initialize(bool create_heap_objects) {
}
char* Bootstrapper::AllocateAutoDeletedArray(int bytes) {
char* memory = new char[bytes];
if (memory != NULL) {
if (delete_these_arrays_on_tear_down == NULL) {
delete_these_arrays_on_tear_down = new List<char*>(2);
}
delete_these_arrays_on_tear_down->Add(memory);
}
return memory;
}
void Bootstrapper::TearDown() {
if (delete_these_non_arrays_on_tear_down != NULL) {
int len = delete_these_non_arrays_on_tear_down->length();
ASSERT(len < 20); // Don't use this mechanism for unbounded allocations.
for (int i = 0; i < len; i++) {
delete delete_these_non_arrays_on_tear_down->at(i);
delete_these_non_arrays_on_tear_down->at(i) = NULL;
}
delete delete_these_non_arrays_on_tear_down;
delete_these_non_arrays_on_tear_down = NULL;
}
if (delete_these_arrays_on_tear_down != NULL) {
int len = delete_these_arrays_on_tear_down->length();
ASSERT(len < 1000); // Don't use this mechanism for unbounded allocations.
for (int i = 0; i < len; i++) {
delete[] delete_these_arrays_on_tear_down->at(i);
delete_these_arrays_on_tear_down->at(i) = NULL;
}
delete delete_these_arrays_on_tear_down;
delete_these_arrays_on_tear_down = NULL;
}
natives_cache.Initialize(false); // Yes, symmetrical
extensions_cache.Initialize(false);
}

4
deps/v8/src/bootstrapper.h

@ -74,10 +74,6 @@ class Bootstrapper : public AllStatic {
static char* ArchiveState(char* to);
static char* RestoreState(char* from);
static void FreeThreadResources();
// This will allocate a char array that is deleted when V8 is shut down.
// It should only be used for strictly finite allocations.
static char* AllocateAutoDeletedArray(int bytes);
};

143
deps/v8/src/code-stubs.cc

@ -35,117 +35,82 @@
namespace v8 {
namespace internal {
bool CodeStub::FindCodeInCache(Code** code_out) {
if (has_custom_cache()) return GetCustomCache(code_out);
int index = Heap::code_stubs()->FindEntry(GetKey());
if (index != NumberDictionary::kNotFound) {
*code_out = Code::cast(Heap::code_stubs()->ValueAt(index));
return true;
}
return false;
}
void CodeStub::GenerateCode(MacroAssembler* masm) {
// Update the static counter each time a new code stub is generated.
Counters::code_stubs.Increment();
// Nested stubs are not allowed for leafs.
masm->set_allow_stub_calls(AllowsStubCalls());
// Generate the code for the stub.
masm->set_generating_stub(true);
Generate(masm);
}
void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
code->set_major_key(MajorKey());
// Add unresolved entries in the code to the fixup list.
Bootstrapper::AddFixup(code, masm);
LOG(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
Counters::total_stubs_code_size.Increment(code->instruction_size());
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) {
#ifdef DEBUG
Print();
#endif
code->Disassemble(GetName());
PrintF("\n");
Handle<Code> CodeStub::GetCode() {
bool custom_cache = has_custom_cache();
int index = 0;
uint32_t key = 0;
if (custom_cache) {
Code* cached;
if (GetCustomCache(&cached)) {
return Handle<Code>(cached);
} else {
index = NumberDictionary::kNotFound;
}
} else {
key = GetKey();
index = Heap::code_stubs()->FindEntry(key);
if (index != NumberDictionary::kNotFound)
return Handle<Code>(Code::cast(Heap::code_stubs()->ValueAt(index)));
}
#endif
}
Handle<Code> CodeStub::GetCode() {
Code* code;
if (!FindCodeInCache(&code)) {
Code* result;
{
v8::HandleScope scope;
// Update the static counter each time a new code stub is generated.
Counters::code_stubs.Increment();
// Generate the new code.
MacroAssembler masm(NULL, 256);
GenerateCode(&masm);
// Nested stubs are not allowed for leafs.
masm.set_allow_stub_calls(AllowsStubCalls());
// Generate the code for the stub.
masm.set_generating_stub(true);
Generate(&masm);
// Create the code object.
CodeDesc desc;
masm.GetCode(&desc);
// Copy the generated code into a heap object.
// Copy the generated code into a heap object, and store the major key.
Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop());
Handle<Code> new_object =
Factory::NewCode(desc, NULL, flags, masm.CodeObject());
RecordCodeGeneration(*new_object, &masm);
Handle<Code> code = Factory::NewCode(desc, NULL, flags, masm.CodeObject());
code->set_major_key(MajorKey());
if (has_custom_cache()) {
SetCustomCache(*new_object);
// Add unresolved entries in the code to the fixup list.
Bootstrapper::AddFixup(*code, &masm);
LOG(CodeCreateEvent(Logger::STUB_TAG, *code, GetName()));
Counters::total_stubs_code_size.Increment(code->instruction_size());
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) {
#ifdef DEBUG
Print();
#endif
code->Disassemble(GetName());
PrintF("\n");
}
#endif
if (custom_cache) {
SetCustomCache(*code);
} else {
// Update the dictionary and the root in Heap.
Handle<NumberDictionary> dict =
Factory::DictionaryAtNumberPut(
Handle<NumberDictionary>(Heap::code_stubs()),
GetKey(),
new_object);
key,
code);
Heap::public_set_code_stubs(*dict);
}
code = *new_object;
}
return Handle<Code>(code);
}
Object* CodeStub::TryGetCode() {
Code* code;
if (!FindCodeInCache(&code)) {
// Generate the new code.
MacroAssembler masm(NULL, 256);
GenerateCode(&masm);
// Create the code object.
CodeDesc desc;
masm.GetCode(&desc);
// Try to copy the generated code into a heap object.
Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop());
Object* new_object =
Heap::CreateCode(desc, NULL, flags, masm.CodeObject());
if (new_object->IsFailure()) return new_object;
code = Code::cast(new_object);
RecordCodeGeneration(code, &masm);
if (has_custom_cache()) {
SetCustomCache(code);
} else {
// Try to update the code cache but do not fail if unable.
new_object = Heap::code_stubs()->AtNumberPut(GetKey(), code);
if (!new_object->IsFailure()) {
Heap::public_set_code_stubs(NumberDictionary::cast(new_object));
}
}
result = *code;
}
return code;
return Handle<Code>(result);
}

19
deps/v8/src/code-stubs.h

@ -43,9 +43,6 @@ namespace internal {
V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \
V(StackCheck) \
V(FastNewClosure) \
V(FastNewContext) \
V(FastCloneShallowArray) \
V(UnarySub) \
V(RevertToNumber) \
V(ToBoolean) \
@ -86,11 +83,6 @@ class CodeStub BASE_EMBEDDED {
// Retrieve the code for the stub. Generate the code if needed.
Handle<Code> GetCode();
// Retrieve the code for the stub if already generated. Do not
// generate the code if not already generated and instead return a
// retry after GC Failure object.
Object* TryGetCode();
static Major MajorKeyFromKey(uint32_t key) {
return static_cast<Major>(MajorKeyBits::decode(key));
};
@ -112,20 +104,9 @@ class CodeStub BASE_EMBEDDED {
static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
private:
// Lookup the code in the (possibly custom) cache.
bool FindCodeInCache(Code** code_out);
// Nonvirtual wrapper around the stub-specific Generate function. Call
// this function to set up the macro assembler and generate the code.
void GenerateCode(MacroAssembler* masm);
// Generates the assembler code for the stub.
virtual void Generate(MacroAssembler* masm) = 0;
// Perform bookkeeping required after code generation when stub code is
// initially generated.
void RecordCodeGeneration(Code* code, MacroAssembler* masm);
// Returns information for computing the number key.
virtual Major MajorKey() = 0;
virtual int MinorKey() = 0;

49
deps/v8/src/codegen.h

@ -233,55 +233,6 @@ class StackCheckStub : public CodeStub {
};
class FastNewClosureStub : public CodeStub {
public:
void Generate(MacroAssembler* masm);
private:
const char* GetName() { return "FastNewClosureStub"; }
Major MajorKey() { return FastNewClosure; }
int MinorKey() { return 0; }
};
class FastNewContextStub : public CodeStub {
public:
static const int kMaximumSlots = 64;
explicit FastNewContextStub(int slots) : slots_(slots) {
ASSERT(slots_ > 0 && slots <= kMaximumSlots);
}
void Generate(MacroAssembler* masm);
private:
int slots_;
const char* GetName() { return "FastNewContextStub"; }
Major MajorKey() { return FastNewContext; }
int MinorKey() { return slots_; }
};
class FastCloneShallowArrayStub : public CodeStub {
public:
static const int kMaximumLength = 8;
explicit FastCloneShallowArrayStub(int length) : length_(length) {
ASSERT(length >= 0 && length <= kMaximumLength);
}
void Generate(MacroAssembler* masm);
private:
int length_;
const char* GetName() { return "FastCloneShallowArrayStub"; }
Major MajorKey() { return FastCloneShallowArray; }
int MinorKey() { return length_; }
};
class InstanceofStub: public CodeStub {
public:
InstanceofStub() { }

78
deps/v8/src/compiler.cc

@ -56,8 +56,6 @@ class CodeGenSelector: public AstVisitor {
private:
// Visit an expression in a given expression context.
void ProcessExpression(Expression* expr, Expression::Context context) {
ASSERT(expr->context() == Expression::kUninitialized ||
expr->context() == context);
Expression::Context saved = context_;
context_ = context;
Visit(expr);
@ -598,7 +596,7 @@ CodeGenSelector::CodeGenTag CodeGenSelector::Select(FunctionLiteral* fun) {
Slot* slot = scope->parameter(i)->slot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
if (FLAG_trace_bailout) {
PrintF("Function has context-allocated parameters.\n");
PrintF("function has context-allocated parameters");
}
return NORMAL;
}
@ -647,18 +645,6 @@ void CodeGenSelector::VisitStatements(ZoneList<Statement*>* stmts) {
void CodeGenSelector::VisitDeclaration(Declaration* decl) {
Property* prop = decl->proxy()->AsProperty();
if (prop != NULL) {
// Property rewrites are shared, ensure we are not changing its
// expression context state.
ASSERT(prop->obj()->context() == Expression::kUninitialized ||
prop->obj()->context() == Expression::kValue);
ASSERT(prop->key()->context() == Expression::kUninitialized ||
prop->key()->context() == Expression::kValue);
ProcessExpression(prop->obj(), Expression::kValue);
ProcessExpression(prop->key(), Expression::kValue);
}
if (decl->fun() != NULL) {
ProcessExpression(decl->fun(), Expression::kValue);
}
@ -690,10 +676,12 @@ void CodeGenSelector::VisitIfStatement(IfStatement* stmt) {
void CodeGenSelector::VisitContinueStatement(ContinueStatement* stmt) {
BAILOUT("ContinueStatement");
}
void CodeGenSelector::VisitBreakStatement(BreakStatement* stmt) {
BAILOUT("BreakStatement");
}
@ -703,12 +691,12 @@ void CodeGenSelector::VisitReturnStatement(ReturnStatement* stmt) {
void CodeGenSelector::VisitWithEnterStatement(WithEnterStatement* stmt) {
ProcessExpression(stmt->expression(), Expression::kValue);
BAILOUT("WithEnterStatement");
}
void CodeGenSelector::VisitWithExitStatement(WithExitStatement* stmt) {
// Supported.
BAILOUT("WithExitStatement");
}
@ -736,7 +724,21 @@ void CodeGenSelector::VisitWhileStatement(WhileStatement* stmt) {
void CodeGenSelector::VisitForStatement(ForStatement* stmt) {
BAILOUT("ForStatement");
// We do not handle loops with breaks or continue statements in their
// body. We will bailout when we hit those statements in the body.
if (stmt->init() != NULL) {
Visit(stmt->init());
CHECK_BAILOUT;
}
if (stmt->cond() != NULL) {
ProcessExpression(stmt->cond(), Expression::kTest);
CHECK_BAILOUT;
}
Visit(stmt->body());
if (stmt->next() != NULL) {
CHECK_BAILOUT;
Visit(stmt->next());
}
}
@ -751,9 +753,7 @@ void CodeGenSelector::VisitTryCatchStatement(TryCatchStatement* stmt) {
void CodeGenSelector::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
Visit(stmt->try_block());
CHECK_BAILOUT;
Visit(stmt->finally_block());
BAILOUT("TryFinallyStatement");
}
@ -885,22 +885,34 @@ void CodeGenSelector::VisitAssignment(Assignment* expr) {
// non-context (stack-allocated) locals, and global variables.
Token::Value op = expr->op();
if (op == Token::INIT_CONST) BAILOUT("initialize constant");
if (op != Token::ASSIGN && op != Token::INIT_VAR) {
BAILOUT("compound assignment");
}
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
Property* prop = expr->target()->AsProperty();
ASSERT(var == NULL || prop == NULL);
if (var != NULL) {
// All global variables are supported.
if (!var->is_global()) {
ASSERT(var->slot() != NULL);
Slot::Type type = var->slot()->type();
if (type == Slot::LOOKUP) {
BAILOUT("Lookup slot");
if (var->slot() == NULL) {
Property* property = var->AsProperty();
if (property == NULL) {
BAILOUT("non-global/non-slot/non-property assignment");
}
if (property->obj()->AsSlot() == NULL) {
BAILOUT("variable rewritten to property non slot object assignment");
}
if (property->key()->AsLiteral() == NULL) {
BAILOUT("variable rewritten to property non literal key assignment");
}
} else {
Slot::Type type = var->slot()->type();
if (type == Slot::LOOKUP) {
BAILOUT("Lookup slot");
}
}
}
} else if (prop != NULL) {
ASSERT(prop->obj()->context() == Expression::kUninitialized ||
prop->obj()->context() == Expression::kValue);
ProcessExpression(prop->obj(), Expression::kValue);
CHECK_BAILOUT;
// We will only visit the key during code generation for keyed property
@ -911,8 +923,6 @@ void CodeGenSelector::VisitAssignment(Assignment* expr) {
if (lit == NULL ||
!lit->handle()->IsSymbol() ||
String::cast(*(lit->handle()))->AsArrayIndex(&ignored)) {
ASSERT(prop->key()->context() == Expression::kUninitialized ||
prop->key()->context() == Expression::kValue);
ProcessExpression(prop->key(), Expression::kValue);
CHECK_BAILOUT;
}
@ -1101,14 +1111,14 @@ void CodeGenSelector::VisitBinaryOperation(BinaryOperation* expr) {
void CodeGenSelector::VisitCompareOperation(CompareOperation* expr) {
ProcessExpression(expr->left(), Expression::kValue);
CHECK_BAILOUT;
ProcessExpression(expr->right(), Expression::kValue);
ProcessExpression(expr->left(), Expression::kValue);
CHECK_BAILOUT;
ProcessExpression(expr->right(), Expression::kValue);
}
void CodeGenSelector::VisitThisFunction(ThisFunction* expr) {
// ThisFunction is supported.
BAILOUT("ThisFunction");
}
#undef BAILOUT

6
deps/v8/src/execution.cc

@ -30,7 +30,6 @@
#include "v8.h"
#include "api.h"
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "debug.h"
#include "simulator.h"
@ -608,11 +607,6 @@ Object* Execution::DebugBreakHelper() {
return Heap::undefined_value();
}
// Ignore debug break during bootstrapping.
if (Bootstrapper::IsActive()) {
return Heap::undefined_value();
}
{
JavaScriptFrameIterator it;
ASSERT(!it.done());

15
deps/v8/src/factory.cc

@ -284,8 +284,7 @@ Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
Handle<JSFunction> Factory::BaseNewFunctionFromBoilerplate(
Handle<JSFunction> boilerplate,
Handle<Map> function_map,
PretenureFlag pretenure) {
Handle<Map> function_map) {
ASSERT(boilerplate->IsBoilerplate());
ASSERT(!boilerplate->has_initial_map());
ASSERT(!boilerplate->has_prototype());
@ -293,22 +292,20 @@ Handle<JSFunction> Factory::BaseNewFunctionFromBoilerplate(
ASSERT(boilerplate->elements() == Heap::empty_fixed_array());
CALL_HEAP_FUNCTION(Heap::AllocateFunction(*function_map,
boilerplate->shared(),
Heap::the_hole_value(),
pretenure),
Heap::the_hole_value()),
JSFunction);
}
Handle<JSFunction> Factory::NewFunctionFromBoilerplate(
Handle<JSFunction> boilerplate,
Handle<Context> context,
PretenureFlag pretenure) {
Handle<JSFunction> result = BaseNewFunctionFromBoilerplate(
boilerplate, Top::function_map(), pretenure);
Handle<Context> context) {
Handle<JSFunction> result =
BaseNewFunctionFromBoilerplate(boilerplate, Top::function_map());
result->set_context(*context);
int number_of_literals = boilerplate->NumberOfLiterals();
Handle<FixedArray> literals =
Factory::NewFixedArray(number_of_literals, pretenure);
Factory::NewFixedArray(number_of_literals, TENURED);
if (number_of_literals > 0) {
// Store the object, regexp and array functions in the literals
// array prefix. These functions will be used when creating

6
deps/v8/src/factory.h

@ -219,8 +219,7 @@ class Factory : public AllStatic {
static Handle<JSFunction> NewFunctionFromBoilerplate(
Handle<JSFunction> boilerplate,
Handle<Context> context,
PretenureFlag pretenure = TENURED);
Handle<Context> context);
static Handle<Code> NewCode(const CodeDesc& desc,
ZoneScopeInfo* sinfo,
@ -375,8 +374,7 @@ class Factory : public AllStatic {
static Handle<JSFunction> BaseNewFunctionFromBoilerplate(
Handle<JSFunction> boilerplate,
Handle<Map> function_map,
PretenureFlag pretenure);
Handle<Map> function_map);
// Create a new map cache.
static Handle<MapCache> NewMapCache(int at_least_space_for);

308
deps/v8/src/fast-codegen.cc

@ -36,7 +36,7 @@
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm())
#define __ ACCESS_MASM(masm_)
Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
Handle<Script> script,
@ -232,10 +232,8 @@ void FastCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
void FastCodeGenerator::VisitBlock(Block* stmt) {
Comment cmnt(masm_, "[ Block");
Breakable nested_statement(this, stmt);
SetStatementPosition(stmt);
VisitStatements(stmt->statements());
__ bind(nested_statement.break_target());
}
@ -280,88 +278,22 @@ void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
Comment cmnt(masm_, "[ ContinueStatement");
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
while (!current->IsContinueTarget(stmt->target())) {
stack_depth = current->Exit(stack_depth);
current = current->outer();
}
__ Drop(stack_depth);
Iteration* loop = current->AsIteration();
__ jmp(loop->continue_target());
UNREACHABLE();
}
void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
Comment cmnt(masm_, "[ BreakStatement");
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
while (!current->IsBreakTarget(stmt->target())) {
stack_depth = current->Exit(stack_depth);
current = current->outer();
}
__ Drop(stack_depth);
Breakable* target = current->AsBreakable();
__ jmp(target->break_target());
}
void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
Expression* expr = stmt->expression();
// Complete the statement based on the type of the subexpression.
if (expr->AsLiteral() != NULL) {
__ Move(result_register(), expr->AsLiteral()->handle());
} else {
ASSERT_EQ(Expression::kValue, expr->context());
Visit(expr);
__ pop(result_register());
}
// Exit all nested statements.
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
while (current != NULL) {
stack_depth = current->Exit(stack_depth);
current = current->outer();
}
__ Drop(stack_depth);
EmitReturnSequence(stmt->statement_pos());
UNREACHABLE();
}
void FastCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
Comment cmnt(masm_, "[ WithEnterStatement");
SetStatementPosition(stmt);
Visit(stmt->expression());
if (stmt->is_catch_block()) {
__ CallRuntime(Runtime::kPushCatchContext, 1);
} else {
__ CallRuntime(Runtime::kPushContext, 1);
}
// Both runtime calls return the new context in both the context and the
// result registers.
// Update local stack frame context field.
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
UNREACHABLE();
}
void FastCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
Comment cmnt(masm_, "[ WithExitStatement");
SetStatementPosition(stmt);
// Pop context.
LoadContextField(context_register(), Context::PREVIOUS_INDEX);
// Update local stack frame context field.
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
UNREACHABLE();
}
@ -372,10 +304,8 @@ void FastCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
Comment cmnt(masm_, "[ DoWhileStatement");
Label body, stack_limit_hit, stack_check_success;
Iteration loop_statement(this, stmt);
increment_loop_depth();
Label body, exit, stack_limit_hit, stack_check_success;
__ bind(&body);
Visit(stmt->body());
@ -386,11 +316,10 @@ void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
// We are not in an expression context because we have been compiling
// statements. Set up a test expression context for the condition.
__ bind(loop_statement.continue_target());
ASSERT_EQ(NULL, true_label_);
ASSERT_EQ(NULL, false_label_);
true_label_ = &body;
false_label_ = loop_statement.break_target();
false_label_ = &exit;
ASSERT(stmt->cond()->context() == Expression::kTest);
Visit(stmt->cond());
true_label_ = NULL;
@ -401,7 +330,7 @@ void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
__ CallStub(&stack_stub);
__ jmp(&stack_check_success);
__ bind(loop_statement.break_target());
__ bind(&exit);
decrement_loop_depth();
}
@ -409,18 +338,16 @@ void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
Comment cmnt(masm_, "[ WhileStatement");
Label body, stack_limit_hit, stack_check_success;
Iteration loop_statement(this, stmt);
increment_loop_depth();
Label test, body, exit, stack_limit_hit, stack_check_success;
// Emit the test at the bottom of the loop.
__ jmp(loop_statement.continue_target());
__ jmp(&test);
__ bind(&body);
Visit(stmt->body());
__ bind(loop_statement.continue_target());
__ bind(&test);
// Check stack before looping.
__ StackLimitCheck(&stack_limit_hit);
__ bind(&stack_check_success);
@ -430,7 +357,7 @@ void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
ASSERT_EQ(NULL, true_label_);
ASSERT_EQ(NULL, false_label_);
true_label_ = &body;
false_label_ = loop_statement.break_target();
false_label_ = &exit;
ASSERT(stmt->cond()->context() == Expression::kTest);
Visit(stmt->cond());
true_label_ = NULL;
@ -441,13 +368,55 @@ void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
__ CallStub(&stack_stub);
__ jmp(&stack_check_success);
__ bind(loop_statement.break_target());
__ bind(&exit);
decrement_loop_depth();
}
void FastCodeGenerator::VisitForStatement(ForStatement* stmt) {
UNREACHABLE();
Comment cmnt(masm_, "[ ForStatement");
Label test, body, exit, stack_limit_hit, stack_check_success;
if (stmt->init() != NULL) Visit(stmt->init());
increment_loop_depth();
// Emit the test at the bottom of the loop (even if empty).
__ jmp(&test);
__ bind(&body);
Visit(stmt->body());
// Check stack before looping.
__ StackLimitCheck(&stack_limit_hit);
__ bind(&stack_check_success);
if (stmt->next() != NULL) Visit(stmt->next());
__ bind(&test);
if (stmt->cond() == NULL) {
// For an empty test jump to the top of the loop.
__ jmp(&body);
} else {
// We are not in an expression context because we have been compiling
// statements. Set up a test expression context for the condition.
ASSERT_EQ(NULL, true_label_);
ASSERT_EQ(NULL, false_label_);
true_label_ = &body;
false_label_ = &exit;
ASSERT(stmt->cond()->context() == Expression::kTest);
Visit(stmt->cond());
true_label_ = NULL;
false_label_ = NULL;
}
__ bind(&stack_limit_hit);
StackCheckStub stack_stub;
__ CallStub(&stack_stub);
__ jmp(&stack_check_success);
__ bind(&exit);
decrement_loop_depth();
}
@ -462,63 +431,7 @@ void FastCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// Try finally is compiled by setting up a try-handler on the stack while
// executing the try body, and removing it again afterwards.
//
// The try-finally construct can enter the finally block in three ways:
// 1. By exiting the try-block normally. This removes the try-handler and
// calls the finally block code before continuing.
// 2. By exiting the try-block with a function-local control flow transfer
// (break/continue/return). The site of the, e.g., break removes the
// try handler and calls the finally block code before continuing
// its outward control transfer.
// 3. by exiting the try-block with a thrown exception.
// This can happen in nested function calls. It traverses the try-handler
// chaing and consumes the try-handler entry before jumping to the
// handler code. The handler code then calls the finally-block before
// rethrowing the exception.
//
// The finally block must assume a return address on top of the stack
// (or in the link register on ARM chips) and a value (return value or
// exception) in the result register (rax/eax/r0), both of which must
// be preserved. The return address isn't GC-safe, so it should be
// cooked before GC.
Label finally_entry;
Label try_handler_setup;
// Setup the try-handler chain. Use a call to
// Jump to try-handler setup and try-block code. Use call to put try-handler
// address on stack.
__ Call(&try_handler_setup);
// Try handler code. Return address of call is pushed on handler stack.
{
// This code is only executed during stack-handler traversal when an
// exception is thrown. The execption is in the result register, which
// is retained by the finally block.
// Call the finally block and then rethrow the exception.
__ Call(&finally_entry);
ThrowException();
}
__ bind(&finally_entry);
{
// Finally block implementation.
EnterFinallyBlock();
Finally finally_block(this);
Visit(stmt->finally_block());
ExitFinallyBlock(); // Return to the calling code.
}
__ bind(&try_handler_setup);
{
// Setup try handler (stack pointer registers).
__ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
TryFinally try_block(this, &finally_entry);
VisitStatements(stmt->try_block()->statements());
__ PopTryHandler();
}
// Execute the finally block on the way out.
__ Call(&finally_entry);
UNREACHABLE();
}
@ -587,79 +500,40 @@ void FastCodeGenerator::VisitLiteral(Literal* expr) {
void FastCodeGenerator::VisitAssignment(Assignment* expr) {
Comment cmnt(masm_, "[ Assignment");
ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
// Record source code position of the (possible) IC call.
SetSourcePosition(expr->position());
// Left-hand side can only be a property, a global or a (parameter or local)
// slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Expression* rhs = expr->value();
// Left-hand side can only be a property, a global or a (parameter or
// local) slot.
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
Property* prop = expr->target()->AsProperty();
// In case of a property we use the uninitialized expression context
// of the key to detect a named property.
if (prop != NULL) {
assign_type = (prop->key()->context() == Expression::kUninitialized)
? NAMED_PROPERTY
: KEYED_PROPERTY;
}
// Evaluate LHS expression.
switch (assign_type) {
case VARIABLE:
// Nothing to do here.
break;
case NAMED_PROPERTY:
Visit(prop->obj());
ASSERT_EQ(Expression::kValue, prop->obj()->context());
break;
case KEYED_PROPERTY:
Visit(prop->obj());
ASSERT_EQ(Expression::kValue, prop->obj()->context());
if (var != NULL) {
Visit(rhs);
ASSERT_EQ(Expression::kValue, rhs->context());
EmitVariableAssignment(expr);
} else if (prop != NULL) {
// Assignment to a property.
Visit(prop->obj());
ASSERT_EQ(Expression::kValue, prop->obj()->context());
// Use the expression context of the key subexpression to detect whether
// we have decided to us a named or keyed IC.
if (prop->key()->context() == Expression::kUninitialized) {
ASSERT(prop->key()->AsLiteral() != NULL);
Visit(rhs);
ASSERT_EQ(Expression::kValue, rhs->context());
EmitNamedPropertyAssignment(expr);
} else {
Visit(prop->key());
ASSERT_EQ(Expression::kValue, prop->key()->context());
break;
}
// If we have a compound assignment: Get value of LHS expression and
// store in on top of the stack.
// Note: Relies on kValue context being 'stack'.
if (expr->is_compound()) {
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
Expression::kValue);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(prop, Expression::kValue);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(Expression::kValue);
break;
}
}
// Evaluate RHS expression.
Expression* rhs = expr->value();
ASSERT_EQ(Expression::kValue, rhs->context());
Visit(rhs);
// If we have a compount assignment: Apply operator.
if (expr->is_compound()) {
EmitCompoundAssignmentOp(expr->binary_op(), Expression::kValue);
}
// Store the value.
switch (assign_type) {
case VARIABLE:
EmitVariableAssignment(expr);
break;
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
case KEYED_PROPERTY:
Visit(rhs);
ASSERT_EQ(Expression::kValue, rhs->context());
EmitKeyedPropertyAssignment(expr);
break;
}
} else {
UNREACHABLE();
}
}
@ -674,20 +548,8 @@ void FastCodeGenerator::VisitThrow(Throw* expr) {
}
int FastCodeGenerator::TryFinally::Exit(int stack_depth) {
// The macros used here must preserve the result register.
__ Drop(stack_depth);
__ PopTryHandler();
__ Call(finally_entry_);
return 0;
}
int FastCodeGenerator::TryCatch::Exit(int stack_depth) {
// The macros used here must preserve the result register.
__ Drop(stack_depth);
__ PopTryHandler();
return 0;
void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
UNREACHABLE();
}

195
deps/v8/src/fast-codegen.h

@ -35,8 +35,6 @@
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// Fast code generator.
class FastCodeGenerator: public AstVisitor {
public:
@ -45,7 +43,6 @@ class FastCodeGenerator: public AstVisitor {
function_(NULL),
script_(script),
is_eval_(is_eval),
nesting_stack_(NULL),
loop_depth_(0),
true_label_(NULL),
false_label_(NULL) {
@ -58,159 +55,6 @@ class FastCodeGenerator: public AstVisitor {
void Generate(FunctionLiteral* fun);
private:
class Breakable;
class Iteration;
class TryCatch;
class TryFinally;
class Finally;
class ForIn;
class NestedStatement BASE_EMBEDDED {
public:
explicit NestedStatement(FastCodeGenerator* codegen) : codegen_(codegen) {
// Link into codegen's nesting stack.
previous_ = codegen->nesting_stack_;
codegen->nesting_stack_ = this;
}
virtual ~NestedStatement() {
// Unlink from codegen's nesting stack.
ASSERT_EQ(this, codegen_->nesting_stack_);
codegen_->nesting_stack_ = previous_;
}
virtual Breakable* AsBreakable() { return NULL; }
virtual Iteration* AsIteration() { return NULL; }
virtual TryCatch* AsTryCatch() { return NULL; }
virtual TryFinally* AsTryFinally() { return NULL; }
virtual Finally* AsFinally() { return NULL; }
virtual ForIn* AsForIn() { return NULL; }
virtual bool IsContinueTarget(Statement* target) { return false; }
virtual bool IsBreakTarget(Statement* target) { return false; }
// Generate code to leave the nested statement. This includes
// cleaning up any stack elements in use and restoring the
// stack to the expectations of the surrounding statements.
// Takes a number of stack elements currently on top of the
// nested statement's stack, and returns a number of stack
// elements left on top of the surrounding statement's stack.
// The generated code must preserve the result register (which
// contains the value in case of a return).
virtual int Exit(int stack_depth) {
// Default implementation for the case where there is
// nothing to clean up.
return stack_depth;
}
NestedStatement* outer() { return previous_; }
protected:
MacroAssembler* masm() { return codegen_->masm(); }
private:
FastCodeGenerator* codegen_;
NestedStatement* previous_;
DISALLOW_COPY_AND_ASSIGN(NestedStatement);
};
class Breakable : public NestedStatement {
public:
Breakable(FastCodeGenerator* codegen,
BreakableStatement* break_target)
: NestedStatement(codegen),
target_(break_target) {}
virtual ~Breakable() {}
virtual Breakable* AsBreakable() { return this; }
virtual bool IsBreakTarget(Statement* statement) {
return target_ == statement;
}
BreakableStatement* statement() { return target_; }
Label* break_target() { return &break_target_label_; }
private:
BreakableStatement* target_;
Label break_target_label_;
DISALLOW_COPY_AND_ASSIGN(Breakable);
};
class Iteration : public Breakable {
public:
Iteration(FastCodeGenerator* codegen,
IterationStatement* iteration_statement)
: Breakable(codegen, iteration_statement) {}
virtual ~Iteration() {}
virtual Iteration* AsIteration() { return this; }
virtual bool IsContinueTarget(Statement* statement) {
return this->statement() == statement;
}
Label* continue_target() { return &continue_target_label_; }
private:
Label continue_target_label_;
DISALLOW_COPY_AND_ASSIGN(Iteration);
};
// The environment inside the try block of a try/catch statement.
class TryCatch : public NestedStatement {
public:
explicit TryCatch(FastCodeGenerator* codegen, Label* catch_entry)
: NestedStatement(codegen), catch_entry_(catch_entry) { }
virtual ~TryCatch() {}
virtual TryCatch* AsTryCatch() { return this; }
Label* catch_entry() { return catch_entry_; }
virtual int Exit(int stack_depth);
private:
Label* catch_entry_;
DISALLOW_COPY_AND_ASSIGN(TryCatch);
};
// The environment inside the try block of a try/finally statement.
class TryFinally : public NestedStatement {
public:
explicit TryFinally(FastCodeGenerator* codegen, Label* finally_entry)
: NestedStatement(codegen), finally_entry_(finally_entry) { }
virtual ~TryFinally() {}
virtual TryFinally* AsTryFinally() { return this; }
Label* finally_entry() { return finally_entry_; }
virtual int Exit(int stack_depth);
private:
Label* finally_entry_;
DISALLOW_COPY_AND_ASSIGN(TryFinally);
};
// A FinallyEnvironment represents being inside a finally block.
// Abnormal termination of the finally block needs to clean up
// the block's parameters from the stack.
class Finally : public NestedStatement {
public:
explicit Finally(FastCodeGenerator* codegen) : NestedStatement(codegen) { }
virtual ~Finally() {}
virtual Finally* AsFinally() { return this; }
virtual int Exit(int stack_depth) {
return stack_depth + kFinallyStackElementCount;
}
private:
// Number of extra stack slots occupied during a finally block.
static const int kFinallyStackElementCount = 2;
DISALLOW_COPY_AND_ASSIGN(Finally);
};
// A ForInEnvironment represents being inside a for-in loop.
// Abnormal termination of the for-in block needs to clean up
// the block's temporary storage from the stack.
class ForIn : public Iteration {
public:
ForIn(FastCodeGenerator* codegen,
ForInStatement* statement)
: Iteration(codegen, statement) { }
virtual ~ForIn() {}
virtual ForIn* AsForIn() { return this; }
virtual int Exit(int stack_depth) {
return stack_depth + kForInStackElementCount;
}
private:
// TODO(lrn): Check that this value is correct when implementing
// for-in.
static const int kForInStackElementCount = 5;
DISALLOW_COPY_AND_ASSIGN(ForIn);
};
int SlotOffset(Slot* slot);
void Move(Expression::Context destination, Register source);
void Move(Expression::Context destination, Slot* source, Register scratch);
@ -240,25 +84,10 @@ class FastCodeGenerator: public AstVisitor {
// Platform-specific code sequences for calls
void EmitCallWithStub(Call* expr);
void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode);
// Platform-specific code for loading variables.
void EmitVariableLoad(Variable* expr, Expression::Context context);
void EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info);
// Platform-specific support for compiling assignments.
// Load a value from a named property and push the result on the stack.
// The receiver is left on the stack by the IC.
void EmitNamedPropertyLoad(Property* expr, Expression::Context context);
// Load a value from a named property and push the result on the stack.
// The receiver and the key is left on the stack by the IC.
void EmitKeyedPropertyLoad(Expression::Context context);
// Apply the compound assignment operator. Expects both operands on top
// of the stack.
void EmitCompoundAssignmentOp(Token::Value op, Expression::Context context);
// Complete a variable assignment. The right-hand-side value is expected
// on top of the stack.
void EmitVariableAssignment(Assignment* expr);
@ -276,12 +105,6 @@ class FastCodeGenerator: public AstVisitor {
void SetStatementPosition(Statement* stmt);
void SetSourcePosition(int pos);
// Non-local control flow support.
void EnterFinallyBlock();
void ExitFinallyBlock();
void ThrowException();
// Loop nesting counter.
int loop_depth() { return loop_depth_; }
void increment_loop_depth() { loop_depth_++; }
void decrement_loop_depth() {
@ -289,22 +112,11 @@ class FastCodeGenerator: public AstVisitor {
loop_depth_--;
}
MacroAssembler* masm() { return masm_; }
static Register result_register();
static Register context_register();
// Set fields in the stack frame. Offsets are the frame pointer relative
// offsets defined in, e.g., StandardFrameConstants.
void StoreToFrameField(int frame_offset, Register value);
// Load a value from the current context. Indices are defined as an enum
// in v8::internal::Context.
void LoadContextField(Register dst, int context_index);
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
// Handles the shortcutted logical binary operations in VisitBinaryOperation.
void EmitLogicalOperation(BinaryOperation* expr);
@ -313,14 +125,11 @@ class FastCodeGenerator: public AstVisitor {
Handle<Script> script_;
bool is_eval_;
Label return_label_;
NestedStatement* nesting_stack_;
int loop_depth_;
Label* true_label_;
Label* false_label_;
friend class NestedStatement;
DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
};

17
deps/v8/src/global-handles.cc

@ -168,12 +168,6 @@ class GlobalHandles::Node : public Malloced {
if (first_deallocated()) {
first_deallocated()->set_next(head());
}
// Check that we are not passing a finalized external string to
// the callback.
ASSERT(!object_->IsExternalAsciiString() ||
ExternalAsciiString::cast(object_)->resource() != NULL);
ASSERT(!object_->IsExternalTwoByteString() ||
ExternalTwoByteString::cast(object_)->resource() != NULL);
// Leaving V8.
VMState state(EXTERNAL);
func(object, par);
@ -442,15 +436,15 @@ void GlobalHandles::RecordStats(HeapStats* stats) {
*stats->near_death_global_handle_count = 0;
*stats->destroyed_global_handle_count = 0;
for (Node* current = head_; current != NULL; current = current->next()) {
*stats->global_handle_count += 1;
*stats->global_handle_count++;
if (current->state_ == Node::WEAK) {
*stats->weak_global_handle_count += 1;
*stats->weak_global_handle_count++;
} else if (current->state_ == Node::PENDING) {
*stats->pending_global_handle_count += 1;
*stats->pending_global_handle_count++;
} else if (current->state_ == Node::NEAR_DEATH) {
*stats->near_death_global_handle_count += 1;
*stats->near_death_global_handle_count++;
} else if (current->state_ == Node::DESTROYED) {
*stats->destroyed_global_handle_count += 1;
*stats->destroyed_global_handle_count++;
}
}
}
@ -513,4 +507,5 @@ void GlobalHandles::RemoveObjectGroups() {
object_groups->Clear();
}
} } // namespace v8::internal

19
deps/v8/src/globals.h

@ -145,14 +145,6 @@ const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
// Desired alignment for maps.
#if V8_HOST_ARCH_64_BIT
const intptr_t kMapAlignmentBits = kObjectAlignmentBits;
#else
const intptr_t kMapAlignmentBits = kObjectAlignmentBits + 3;
#endif
const intptr_t kMapAlignment = (1 << kMapAlignmentBits);
const intptr_t kMapAlignmentMask = kMapAlignment - 1;
// Tag information for Failure.
const int kFailureTag = 3;
@ -182,11 +174,6 @@ const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad);
#endif
// Number of bits to represent the page size for paged spaces. The value of 13
// gives 8K bytes per page.
const int kPageSizeBits = 13;
// Constants relevant to double precision floating point numbers.
// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
@ -307,7 +294,7 @@ enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, VISIT_ONLY_STRONG };
enum VisitMode { VISIT_ALL, VISIT_ONLY_STRONG };
// A CodeDesc describes a buffer holding instructions and relocation
@ -463,10 +450,6 @@ enum StateTag {
#define POINTER_SIZE_ALIGN(value) \
(((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
// MAP_SIZE_ALIGN returns the value aligned as a map pointer.
#define MAP_SIZE_ALIGN(value) \
(((value) + kMapAlignmentMask) & ~kMapAlignmentMask)
// The expression OFFSET_OF(type, field) computes the byte-offset
// of the specified field relative to the containing type. This
// corresponds to 'offsetof' (in stddef.h), except that it doesn't

69
deps/v8/src/heap-inl.h

@ -109,19 +109,6 @@ Object* Heap::NumberFromUint32(uint32_t value) {
}
void Heap::FinalizeExternalString(String* string) {
ASSERT(string->IsExternalString());
v8::String::ExternalStringResourceBase** resource_addr =
reinterpret_cast<v8::String::ExternalStringResourceBase**>(
reinterpret_cast<byte*>(string) +
ExternalString::kResourceOffset -
kHeapObjectTag);
delete *resource_addr;
// Clear the resource pointer in the string.
*resource_addr = NULL;
}
Object* Heap::AllocateRawMap() {
#ifdef DEBUG
Counters::objs_since_last_full.Increment();
@ -129,12 +116,6 @@ Object* Heap::AllocateRawMap() {
#endif
Object* result = map_space_->AllocateRaw(Map::kSize);
if (result->IsFailure()) old_gen_exhausted_ = true;
#ifdef DEBUG
if (!result->IsFailure()) {
// Maps have their own alignment.
CHECK((OffsetFrom(result) & kMapAlignmentMask) == kHeapObjectTag);
}
#endif
return result;
}
@ -340,56 +321,6 @@ inline bool Heap::allow_allocation(bool new_state) {
#endif
void ExternalStringTable::AddString(String* string) {
ASSERT(string->IsExternalString());
if (Heap::InNewSpace(string)) {
new_space_strings_.Add(string);
} else {
old_space_strings_.Add(string);
}
}
void ExternalStringTable::Iterate(ObjectVisitor* v) {
if (!new_space_strings_.is_empty()) {
Object** start = &new_space_strings_[0];
v->VisitPointers(start, start + new_space_strings_.length());
}
if (!old_space_strings_.is_empty()) {
Object** start = &old_space_strings_[0];
v->VisitPointers(start, start + old_space_strings_.length());
}
}
// Verify() is inline to avoid ifdef-s around its calls in release
// mode.
void ExternalStringTable::Verify() {
#ifdef DEBUG
for (int i = 0; i < new_space_strings_.length(); ++i) {
ASSERT(Heap::InNewSpace(new_space_strings_[i]));
ASSERT(new_space_strings_[i] != Heap::raw_unchecked_null_value());
}
for (int i = 0; i < old_space_strings_.length(); ++i) {
ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
ASSERT(old_space_strings_[i] != Heap::raw_unchecked_null_value());
}
#endif
}
void ExternalStringTable::AddOldString(String* string) {
ASSERT(string->IsExternalString());
ASSERT(!Heap::InNewSpace(string));
old_space_strings_.Add(string);
}
void ExternalStringTable::ShrinkNewStrings(int position) {
new_space_strings_.Rewind(position);
Verify();
}
} } // namespace v8::internal
#endif // V8_HEAP_INL_H_

5
deps/v8/src/heap-profiler.cc

@ -667,9 +667,8 @@ void ProducerHeapProfile::Setup() {
can_log_ = true;
}
void ProducerHeapProfile::DoRecordJSObjectAllocation(Object* obj) {
ASSERT(FLAG_log_producers);
if (!can_log_) return;
void ProducerHeapProfile::RecordJSObjectAllocation(Object* obj) {
if (!can_log_ || !FLAG_log_producers) return;
int framesCount = 0;
for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
++framesCount;

6
deps/v8/src/heap-profiler.h

@ -261,12 +261,8 @@ class RetainerHeapProfile BASE_EMBEDDED {
class ProducerHeapProfile : public AllStatic {
public:
static void Setup();
static void RecordJSObjectAllocation(Object* obj) {
if (FLAG_log_producers) DoRecordJSObjectAllocation(obj);
}
static void RecordJSObjectAllocation(Object* obj);
private:
static void DoRecordJSObjectAllocation(Object* obj);
static bool can_log_;
};

159
deps/v8/src/heap.cc

@ -733,7 +733,7 @@ void Heap::Scavenge() {
ScavengeVisitor scavenge_visitor;
// Copy roots.
IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
IterateRoots(&scavenge_visitor, VISIT_ALL);
// Copy objects reachable from the old generation. By definition,
// there are no intergenerational pointers in code or data spaces.
@ -753,63 +753,6 @@ void Heap::Scavenge() {
}
}
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
ScavengeExternalStringTable();
ASSERT(new_space_front == new_space_.top());
// Set age mark.
new_space_.set_age_mark(new_space_.top());
// Update how much has survived scavenge.
survived_since_last_expansion_ +=
(PromotedSpaceSize() - survived_watermark) + new_space_.Size();
LOG(ResourceEvent("scavenge", "end"));
gc_state_ = NOT_IN_GC;
}
void Heap::ScavengeExternalStringTable() {
ExternalStringTable::Verify();
if (ExternalStringTable::new_space_strings_.is_empty()) return;
Object** start = &ExternalStringTable::new_space_strings_[0];
Object** end = start + ExternalStringTable::new_space_strings_.length();
Object** last = start;
for (Object** p = start; p < end; ++p) {
ASSERT(Heap::InFromSpace(*p));
MapWord first_word = HeapObject::cast(*p)->map_word();
if (!first_word.IsForwardingAddress()) {
// Unreachable external string can be finalized.
FinalizeExternalString(String::cast(*p));
continue;
}
// String is still reachable.
String* target = String::cast(first_word.ToForwardingAddress());
ASSERT(target->IsExternalString());
if (Heap::InNewSpace(target)) {
// String is still in new space. Update the table entry.
*last = target;
++last;
} else {
// String got promoted. Move it to the old string list.
ExternalStringTable::AddOldString(target);
}
}
ExternalStringTable::ShrinkNewStrings(last - start);
}
Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
Address new_space_front) {
do {
ASSERT(new_space_front <= new_space_.top());
@ -818,7 +761,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// queue is empty.
while (new_space_front < new_space_.top()) {
HeapObject* object = HeapObject::FromAddress(new_space_front);
object->Iterate(scavenge_visitor);
object->Iterate(&scavenge_visitor);
new_space_front += object->Size();
}
@ -840,7 +783,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
RecordCopiedObject(target);
#endif
// Visit the newly copied object for pointers to new space.
target->Iterate(scavenge_visitor);
target->Iterate(&scavenge_visitor);
UpdateRSet(target);
}
@ -848,7 +791,16 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// (there are currently no more unswept promoted objects).
} while (new_space_front < new_space_.top());
return new_space_front;
// Set age mark.
new_space_.set_age_mark(new_space_.top());
// Update how much has survived scavenge.
survived_since_last_expansion_ +=
(PromotedSpaceSize() - survived_watermark) + new_space_.Size();
LOG(ResourceEvent("scavenge", "end"));
gc_state_ = NOT_IN_GC;
}
@ -1142,13 +1094,6 @@ Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
map->set_unused_property_fields(0);
map->set_bit_field(0);
map->set_bit_field2(0);
// If the map object is aligned fill the padding area with Smi 0 objects.
if (Map::kPadStart < Map::kSize) {
memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
0,
Map::kSize - Map::kPadStart);
}
return map;
}
@ -2240,11 +2185,8 @@ Object* Heap::AllocateFunctionPrototype(JSFunction* function) {
Object* Heap::AllocateFunction(Map* function_map,
SharedFunctionInfo* shared,
Object* prototype,
PretenureFlag pretenure) {
AllocationSpace space =
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
Object* result = Allocate(function_map, space);
Object* prototype) {
Object* result = Allocate(function_map, OLD_POINTER_SPACE);
if (result->IsFailure()) return result;
return InitializeFunction(JSFunction::cast(result), shared, prototype);
}
@ -2261,14 +2203,10 @@ Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
JSObject* boilerplate =
Top::context()->global_context()->arguments_boilerplate();
// Check that the size of the boilerplate matches our
// expectations. The ArgumentsAccessStub::GenerateNewObject relies
// on the size being a known constant.
ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size());
// Do the allocation.
Object* result =
AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE);
// Make the clone.
Map* map = boilerplate->map();
int object_size = map->instance_size();
Object* result = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
if (result->IsFailure()) return result;
// Copy the content. The arguments boilerplate doesn't have any
@ -2276,7 +2214,7 @@ Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
// barrier here.
CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()),
reinterpret_cast<Object**>(boilerplate->address()),
kArgumentsObjectSize);
object_size);
// Set the two properties.
JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
@ -3237,11 +3175,6 @@ void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
IterateStrongRoots(v, mode);
v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
v->Synchronize("symbol_table");
if (mode != VISIT_ALL_IN_SCAVENGE) {
// Scavenge collections have special processing for this.
ExternalStringTable::Iterate(v);
}
v->Synchronize("external_string_table");
}
@ -3270,12 +3203,11 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
HandleScopeImplementer::Iterate(v);
v->Synchronize("handlescope");
// Iterate over the builtin code objects and code stubs in the
// heap. Note that it is not necessary to iterate over code objects
// on scavenge collections.
if (mode != VISIT_ALL_IN_SCAVENGE) {
Builtins::IterateBuiltins(v);
}
// Iterate over the builtin code objects and code stubs in the heap. Note
// that it is not strictly necessary to iterate over code objects on
// scavenge collections. We still do it here because this same function
// is used by the mark-sweep collector and the deserializer.
Builtins::IterateBuiltins(v);
v->Synchronize("builtins");
// Iterate over global handles.
@ -3492,8 +3424,6 @@ void Heap::SetStackLimits() {
void Heap::TearDown() {
GlobalHandles::TearDown();
ExternalStringTable::TearDown();
new_space_.TearDown();
if (old_pointer_space_ != NULL) {
@ -3909,8 +3839,8 @@ class MarkRootVisitor: public ObjectVisitor {
// Triggers a depth-first traversal of reachable objects from roots
// and finds a path to a specific heap object and prints it.
void Heap::TracePathToObject(Object* target) {
search_target = target;
void Heap::TracePathToObject() {
search_target = NULL;
search_for_any_global = false;
MarkRootVisitor root_visitor;
@ -3977,8 +3907,8 @@ const char* GCTracer::CollectorString() {
int KeyedLookupCache::Hash(Map* map, String* name) {
// Uses only lower 32 bits if pointers are larger.
uintptr_t addr_hash =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
return (addr_hash ^ name->Hash()) & kCapacityMask;
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> 2;
return (addr_hash ^ name->Hash()) % kLength;
}
@ -4061,35 +3991,4 @@ void TranscendentalCache::Clear() {
}
void ExternalStringTable::CleanUp() {
int last = 0;
for (int i = 0; i < new_space_strings_.length(); ++i) {
if (new_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
if (Heap::InNewSpace(new_space_strings_[i])) {
new_space_strings_[last++] = new_space_strings_[i];
} else {
old_space_strings_.Add(new_space_strings_[i]);
}
}
new_space_strings_.Rewind(last);
last = 0;
for (int i = 0; i < old_space_strings_.length(); ++i) {
if (old_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
old_space_strings_[last++] = old_space_strings_[i];
}
old_space_strings_.Rewind(last);
Verify();
}
void ExternalStringTable::TearDown() {
new_space_strings_.Free();
old_space_strings_.Free();
}
List<Object*> ExternalStringTable::new_space_strings_;
List<Object*> ExternalStringTable::old_space_strings_;
} } // namespace v8::internal

69
deps/v8/src/heap.h

@ -487,12 +487,9 @@ class Heap : public AllStatic {
// Please note this does not perform a garbage collection.
static Object* AllocateFunction(Map* function_map,
SharedFunctionInfo* shared,
Object* prototype,
PretenureFlag pretenure = TENURED);
Object* prototype);
// Indicies for direct access into argument objects.
static const int kArgumentsObjectSize =
JSObject::kHeaderSize + 2 * kPointerSize;
static const int arguments_callee_index = 0;
static const int arguments_length_index = 1;
@ -569,10 +566,6 @@ class Heap : public AllStatic {
static Object* AllocateExternalStringFromTwoByte(
ExternalTwoByteString::Resource* resource);
// Finalizes an external string by deleting the associated external
// data and clearing the resource pointer.
static inline void FinalizeExternalString(String* string);
// Allocates an uninitialized object. The memory is non-executable if the
// hardware and OS allow.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@ -785,7 +778,7 @@ class Heap : public AllStatic {
return disallow_allocation_failure_;
}
static void TracePathToObject(Object* target);
static void TracePathToObject();
static void TracePathToGlobal();
#endif
@ -893,7 +886,7 @@ class Heap : public AllStatic {
// The number of MapSpace pages is limited by the way we pack
// Map pointers during GC.
static const int kMaxMapSpaceSize =
(1 << (MapWord::kMapPageIndexBits)) * Page::kPageSize;
(1 << MapWord::kMapPageIndexBits) * Page::kPageSize;
#if defined(V8_TARGET_ARCH_X64)
static const int kMaxObjectSizeInNewSpace = 512*KB;
@ -1046,9 +1039,6 @@ class Heap : public AllStatic {
// Performs a minor collection in new generation.
static void Scavenge();
static void ScavengeExternalStringTable();
static Address DoScavenge(ObjectVisitor* scavenge_visitor,
Address new_space_front);
// Performs a major collection in the whole heap.
static void MarkCompact(GCTracer* tracer);
@ -1303,35 +1293,19 @@ class KeyedLookupCache {
// Clear the cache.
static void Clear();
static const int kLength = 64;
static const int kCapacityMask = kLength - 1;
static const int kMapHashShift = 2;
private:
static inline int Hash(Map* map, String* name);
// Get the address of the keys and field_offsets arrays. Used in
// generated code to perform cache lookups.
static Address keys_address() {
return reinterpret_cast<Address>(&keys_);
}
static Address field_offsets_address() {
return reinterpret_cast<Address>(&field_offsets_);
}
static const int kLength = 64;
struct Key {
Map* map;
String* name;
};
static Key keys_[kLength];
static int field_offsets_[kLength];
friend class ExternalReference;
};
// Cache for mapping (array, property name) into descriptor index.
// The cache contains both positive and negative results.
// Descriptor index equals kNotFound means the property is absent.
@ -1649,39 +1623,6 @@ class TranscendentalCache {
};
// External strings table is a place where all external strings are
// registered. We need to keep track of such strings to properly
// finalize them.
class ExternalStringTable : public AllStatic {
public:
// Registers an external string.
inline static void AddString(String* string);
inline static void Iterate(ObjectVisitor* v);
// Restores internal invariant and gets rid of collected strings.
// Must be called after each Iterate() that modified the strings.
static void CleanUp();
// Destroys all allocated memory.
static void TearDown();
private:
friend class Heap;
inline static void Verify();
inline static void AddOldString(String* string);
// Notifies the table that only a prefix of the new list is valid.
inline static void ShrinkNewStrings(int position);
// To speed up scavenge collections new space string are kept
// separate from old space strings.
static List<Object*> new_space_strings_;
static List<Object*> old_space_strings_;
};
} } // namespace v8::internal
#endif // V8_HEAP_H_

11
deps/v8/src/ia32/assembler-ia32.cc

@ -2004,17 +2004,6 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) {
}
void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x57);
emit_sse_operand(dst, src);
}
void Assembler::comisd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);

1
deps/v8/src/ia32/assembler-ia32.h

@ -745,7 +745,6 @@ class Assembler : public Malloced {
void subsd(XMMRegister dst, XMMRegister src);
void mulsd(XMMRegister dst, XMMRegister src);
void divsd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
void comisd(XMMRegister dst, XMMRegister src);

47
deps/v8/src/ia32/builtins-ia32.cc

@ -472,38 +472,35 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ bind(&done);
}
// 4. Check that the function really is a function.
{ Label done;
__ test(edi, Operand(edi));
__ j(not_zero, &done, taken);
__ xor_(ebx, Operand(ebx));
// CALL_NON_FUNCTION will expect to find the non-function callee on the
// expression stack of the caller. Transfer it from receiver to the
// caller's expression stack (and make the first argument the receiver
// for CALL_NON_FUNCTION) by decrementing the argument count.
__ dec(eax);
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
__ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
__ bind(&done);
}
// 5. Shift arguments and return address one slot down on the stack
// (overwriting the receiver).
// 4. Shift stuff one slot down the stack.
{ Label loop;
__ mov(ecx, eax);
__ lea(ecx, Operand(eax, +1)); // +1 ~ copy receiver too
__ bind(&loop);
__ mov(ebx, Operand(esp, ecx, times_4, 0));
__ mov(Operand(esp, ecx, times_4, kPointerSize), ebx);
__ dec(ecx);
__ j(not_sign, &loop);
__ pop(ebx); // Discard copy of return address.
__ dec(eax); // One fewer argument (first argument is new receiver).
__ j(not_zero, &loop);
}
// 6. Get the code to call from the function and check that the number of
// expected arguments matches what we're providing.
{ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
// 5. Remove TOS (copy of last arguments), but keep return address.
__ pop(ebx);
__ pop(ecx);
__ push(ebx);
__ dec(eax);
// 6. Check that function really was a function and get the code to
// call from the function and check that the number of expected
// arguments matches what we're providing.
{ Label invoke;
__ test(edi, Operand(edi));
__ j(not_zero, &invoke, taken);
__ xor_(ebx, Operand(ebx));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
__ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
__ bind(&invoke);
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx,
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));

458
deps/v8/src/ia32/codegen-ia32.cc

@ -174,19 +174,12 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
function_return_is_shadowed_ = false;
// Allocate the local context if needed.
int heap_slots = scope_->num_heap_slots();
if (heap_slots > 0) {
if (scope_->num_heap_slots() > 0) {
Comment cmnt(masm_, "[ allocate local context");
// Allocate local context.
// Get outer context and create a new context based on it.
frame_->PushFunction();
Result context;
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
context = frame_->CallStub(&stub, 1);
} else {
context = frame_->CallRuntime(Runtime::kNewContext, 1);
}
Result context = frame_->CallRuntime(Runtime::kNewContext, 1);
// Update context local.
frame_->SaveContextRegister();
@ -770,27 +763,19 @@ class FloatingPointHelper : public AllStatic {
const char* GenericBinaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int len = 100;
name_ = Bootstrapper::AllocateAutoDeletedArray(len);
if (name_ == NULL) return "OOM";
const char* op_name = Token::Name(op_);
const char* overwrite_name;
switch (mode_) {
case NO_OVERWRITE: overwrite_name = "Alloc"; break;
case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
default: overwrite_name = "UnknownOverwrite"; break;
}
OS::SNPrintF(Vector<char>(name_, len),
"GenericBinaryOpStub_%s_%s%s_%s%s",
op_name,
overwrite_name,
(flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
args_in_registers_ ? "RegArgs" : "StackArgs",
args_reversed_ ? "_R" : "");
return name_;
switch (op_) {
case Token::ADD: return "GenericBinaryOpStub_ADD";
case Token::SUB: return "GenericBinaryOpStub_SUB";
case Token::MUL: return "GenericBinaryOpStub_MUL";
case Token::DIV: return "GenericBinaryOpStub_DIV";
case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
case Token::SAR: return "GenericBinaryOpStub_SAR";
case Token::SHL: return "GenericBinaryOpStub_SHL";
case Token::SHR: return "GenericBinaryOpStub_SHR";
default: return "GenericBinaryOpStub";
}
}
@ -818,88 +803,14 @@ class DeferredInlineBinaryOperation: public DeferredCode {
void DeferredInlineBinaryOperation::Generate() {
Label done;
if (CpuFeatures::IsSupported(SSE2) && ((op_ == Token::ADD) ||
(op_ ==Token::SUB) ||
(op_ == Token::MUL) ||
(op_ == Token::DIV))) {
CpuFeatures::Scope use_sse2(SSE2);
Label call_runtime, after_alloc_failure;
Label left_smi, right_smi, load_right, do_op;
__ test(left_, Immediate(kSmiTagMask));
__ j(zero, &left_smi);
__ cmp(FieldOperand(left_, HeapObject::kMapOffset),
Factory::heap_number_map());
__ j(not_equal, &call_runtime);
__ movdbl(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
if (mode_ == OVERWRITE_LEFT) {
__ mov(dst_, left_);
}
__ jmp(&load_right);
__ bind(&left_smi);
__ sar(left_, 1);
__ cvtsi2sd(xmm0, Operand(left_));
__ shl(left_, 1);
if (mode_ == OVERWRITE_LEFT) {
Label alloc_failure;
__ push(left_);
__ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
__ pop(left_);
}
__ bind(&load_right);
__ test(right_, Immediate(kSmiTagMask));
__ j(zero, &right_smi);
__ cmp(FieldOperand(right_, HeapObject::kMapOffset),
Factory::heap_number_map());
__ j(not_equal, &call_runtime);
__ movdbl(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
if (mode_ == OVERWRITE_RIGHT) {
__ mov(dst_, right_);
} else if (mode_ == NO_OVERWRITE) {
Label alloc_failure;
__ push(left_);
__ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
__ pop(left_);
}
__ jmp(&do_op);
__ bind(&right_smi);
__ sar(right_, 1);
__ cvtsi2sd(xmm1, Operand(right_));
__ shl(right_, 1);
if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
Label alloc_failure;
__ push(left_);
__ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
__ pop(left_);
}
__ bind(&do_op);
switch (op_) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm0, xmm1); break;
case Token::MUL: __ mulsd(xmm0, xmm1); break;
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
__ movdbl(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
__ jmp(&done);
__ bind(&after_alloc_failure);
__ pop(left_);
__ bind(&call_runtime);
}
GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
stub.GenerateCall(masm_, left_, right_);
if (!dst_.is(eax)) __ mov(dst_, eax);
__ bind(&done);
}
void CodeGenerator::GenericBinaryOperation(Token::Value op,
StaticType* type,
SmiAnalysis* type,
OverwriteMode overwrite_mode) {
Comment cmnt(masm_, "[ BinaryOperation");
Comment cmnt_token(masm_, Token::String(op));
@ -1580,7 +1491,7 @@ void DeferredInlineSmiSub::Generate() {
void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
Result* operand,
Handle<Object> value,
StaticType* type,
SmiAnalysis* type,
bool reversed,
OverwriteMode overwrite_mode) {
// NOTE: This is an attempt to inline (a bit) more of the code for
@ -1865,8 +1776,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
}
void CodeGenerator::Comparison(AstNode* node,
Condition cc,
void CodeGenerator::Comparison(Condition cc,
bool strict,
ControlDestination* dest) {
// Strict only makes sense for equality comparisons.
@ -1913,8 +1823,7 @@ void CodeGenerator::Comparison(AstNode* node,
default:
UNREACHABLE();
}
} else {
// Only one side is a constant Smi.
} else { // Only one side is a constant Smi.
// If left side is a constant Smi, reverse the operands.
// Since one side is a constant Smi, conversion order does not matter.
if (left_side_constant_smi) {
@ -1928,8 +1837,6 @@ void CodeGenerator::Comparison(AstNode* node,
// Implement comparison against a constant Smi, inlining the case
// where both sides are Smis.
left_side.ToRegister();
Register left_reg = left_side.reg();
Handle<Object> right_val = right_side.handle();
// Here we split control flow to the stub call and inlined cases
// before finally splitting it to the control destination. We use
@ -1937,50 +1844,11 @@ void CodeGenerator::Comparison(AstNode* node,
// the first split. We manually handle the off-frame references
// by reconstituting them on the non-fall-through path.
JumpTarget is_smi;
Register left_reg = left_side.reg();
Handle<Object> right_val = right_side.handle();
__ test(left_side.reg(), Immediate(kSmiTagMask));
is_smi.Branch(zero, taken);
bool is_for_loop_compare = (node->AsCompareOperation() != NULL)
&& node->AsCompareOperation()->is_for_loop_condition();
if (!is_for_loop_compare
&& CpuFeatures::IsSupported(SSE2)
&& right_val->IsSmi()) {
// Right side is a constant smi and left side has been checked
// not to be a smi.
CpuFeatures::Scope use_sse2(SSE2);
JumpTarget not_number;
__ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map()));
not_number.Branch(not_equal, &left_side);
__ movdbl(xmm1,
FieldOperand(left_reg, HeapNumber::kValueOffset));
int value = Smi::cast(*right_val)->value();
if (value == 0) {
__ xorpd(xmm0, xmm0);
} else {
Result temp = allocator()->Allocate();
__ mov(temp.reg(), Immediate(value));
__ cvtsi2sd(xmm0, Operand(temp.reg()));
temp.Unuse();
}
__ comisd(xmm1, xmm0);
// Jump to builtin for NaN.
not_number.Branch(parity_even, &left_side);
left_side.Unuse();
Condition double_cc = cc;
switch (cc) {
case less: double_cc = below; break;
case equal: double_cc = equal; break;
case less_equal: double_cc = below_equal; break;
case greater: double_cc = above; break;
case greater_equal: double_cc = above_equal; break;
default: UNREACHABLE();
}
dest->true_target()->Branch(double_cc);
dest->false_target()->Jump();
not_number.Bind(&left_side);
}
// Setup and call the compare stub.
CompareStub stub(cc, strict);
Result result = frame_->CallStub(&stub, &left_side, &right_side);
@ -2004,7 +1872,6 @@ void CodeGenerator::Comparison(AstNode* node,
right_side.Unuse();
dest->Split(cc);
}
} else if (cc == equal &&
(left_side_constant_null || right_side_constant_null)) {
// To make null checks efficient, we check if either the left side or
@ -2041,8 +1908,7 @@ void CodeGenerator::Comparison(AstNode* node,
operand.Unuse();
dest->Split(not_zero);
}
} else {
// Neither side is a constant Smi or null.
} else { // Neither side is a constant Smi or null.
// If either side is a non-smi constant, skip the smi check.
bool known_non_smi =
(left_side.is_constant() && !left_side.handle()->IsSmi()) ||
@ -2709,7 +2575,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
// Compare and branch to the body if true or the next test if
// false. Prefer the next test as a fall through.
ControlDestination dest(clause->body_target(), &next_test, false);
Comparison(node, equal, true, &dest);
Comparison(equal, true, &dest);
// If the comparison fell through to the true target, jump to the
// actual body.
@ -3719,28 +3585,18 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
// Call the runtime to instantiate the function boilerplate object.
// The inevitable call will sync frame elements to memory anyway, so
// we do it eagerly to allow us to push the arguments directly into
// place.
ASSERT(boilerplate->IsBoilerplate());
frame_->SyncRange(0, frame_->element_count() - 1);
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
FastNewClosureStub stub;
frame_->Push(boilerplate);
Result answer = frame_->CallStub(&stub, 1);
frame_->Push(&answer);
} else {
// Call the runtime to instantiate the function boilerplate
// object. The inevitable call will sync frame elements to memory
// anyway, so we do it eagerly to allow us to push the arguments
// directly into place.
frame_->SyncRange(0, frame_->element_count() - 1);
// Create a new closure.
frame_->EmitPush(esi);
frame_->EmitPush(Immediate(boilerplate));
Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
frame_->Push(&result);
}
// Create a new closure.
frame_->EmitPush(esi);
frame_->EmitPush(Immediate(boilerplate));
Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
frame_->Push(&result);
}
@ -4439,23 +4295,18 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
// Push the resulting array literal boilerplate on the stack.
frame_->Push(&boilerplate);
// Clone the boilerplate object.
int length = node->values()->length();
Result clone;
if (node->depth() == 1 &&
length <= FastCloneShallowArrayStub::kMaximumLength) {
FastCloneShallowArrayStub stub(length);
clone = frame_->CallStub(&stub, 1);
} else {
clone = frame_->CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
if (node->depth() == 1) {
clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
}
Result clone = frame_->CallRuntime(clone_function_id, 1);
// Push the newly cloned literal object as the result.
frame_->Push(&clone);
// Generate code to set the elements in the array that are not
// literals.
for (int i = 0; i < length; i++) {
for (int i = 0; i < node->values()->length(); i++) {
Expression* value = node->values()->at(i);
// If value is a literal the property value is already set in the
@ -4684,6 +4535,9 @@ void CodeGenerator::VisitCall(Call* node) {
// JavaScript example: 'foo(1, 2, 3)' // foo is global
// ----------------------------------
// Push the name of the function and the receiver onto the stack.
frame_->Push(var->name());
// Pass the global object as the receiver and let the IC stub
// patch the stack to use the global proxy as 'this' in the
// invoked function.
@ -4695,16 +4549,14 @@ void CodeGenerator::VisitCall(Call* node) {
Load(args->at(i));
}
// Push the name of the function onto the frame.
frame_->Push(var->name());
// Call the IC initialization code.
CodeForSourcePosition(node->position());
Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
arg_count,
loop_nesting());
frame_->RestoreContextRegister();
frame_->Push(&result);
// Replace the function on the stack with the result.
frame_->SetElementAt(0, &result);
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
@ -4757,7 +4609,8 @@ void CodeGenerator::VisitCall(Call* node) {
node->position());
} else {
// Push the receiver onto the frame.
// Push the name of the function and the receiver onto the stack.
frame_->Push(name);
Load(property->obj());
// Load the arguments.
@ -4766,16 +4619,14 @@ void CodeGenerator::VisitCall(Call* node) {
Load(args->at(i));
}
// Push the name of the function onto the frame.
frame_->Push(name);
// Call the IC initialization code.
CodeForSourcePosition(node->position());
Result result =
frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count,
loop_nesting());
frame_->RestoreContextRegister();
frame_->Push(&result);
// Replace the function on the stack with the result.
frame_->SetElementAt(0, &result);
}
} else {
@ -5433,6 +5284,8 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
Runtime::Function* function = node->function();
if (function == NULL) {
// Prepare stack for calling JS runtime function.
frame_->Push(node->name());
// Push the builtins object found in the current global object.
Result temp = allocator()->Allocate();
ASSERT(temp.is_valid());
@ -5449,12 +5302,11 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (function == NULL) {
// Call the JS runtime function.
frame_->Push(node->name());
Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
arg_count,
loop_nesting_);
frame_->RestoreContextRegister();
frame_->Push(&answer);
frame_->SetElementAt(0, &answer);
} else {
// Call the C runtime function.
Result answer = frame_->CallRuntime(function, arg_count);
@ -6122,7 +5974,7 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
}
Load(left);
Load(right);
Comparison(node, cc, strict, destination());
Comparison(cc, strict, destination());
}
@ -6576,7 +6428,7 @@ void Reference::SetValue(InitState init_state) {
// a loop and the key is likely to be a smi.
Property* property = expression()->AsProperty();
ASSERT(property != NULL);
StaticType* key_smi_analysis = property->key()->type();
SmiAnalysis* key_smi_analysis = property->key()->type();
if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
Comment cmnt(masm, "[ Inlined store to keyed Property");
@ -6677,133 +6529,6 @@ void Reference::SetValue(InitState init_state) {
}
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Clone the boilerplate in new space. Set the context to the
// current context in esi.
Label gc;
__ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
// Get the boilerplate function from the stack.
__ mov(edx, Operand(esp, 1 * kPointerSize));
// Compute the function map in the current global context and set that
// as the map of the allocated object.
__ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
__ mov(ecx, Operand(ecx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
__ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
// Clone the rest of the boilerplate fields. We don't have to update
// the write barrier because the allocated object is in new space.
for (int offset = kPointerSize;
offset < JSFunction::kSize;
offset += kPointerSize) {
if (offset == JSFunction::kContextOffset) {
__ mov(FieldOperand(eax, offset), esi);
} else {
__ mov(ebx, FieldOperand(edx, offset));
__ mov(FieldOperand(eax, offset), ebx);
}
}
// Return and remove the on-stack parameter.
__ ret(1 * kPointerSize);
// Create a new closure through the slower runtime call.
__ bind(&gc);
__ pop(ecx); // Temporarily remove return address.
__ pop(edx);
__ push(esi);
__ push(edx);
__ push(ecx); // Restore return address.
__ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
}
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
__ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
eax, ebx, ecx, &gc, TAG_OBJECT);
// Get the function from the stack.
__ mov(ecx, Operand(esp, 1 * kPointerSize));
// Setup the object header.
__ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
__ mov(FieldOperand(eax, Array::kLengthOffset), Immediate(length));
// Setup the fixed slots.
__ xor_(ebx, Operand(ebx)); // Set to NULL.
__ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
__ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax);
__ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx);
__ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
// Copy the global object from the surrounding context.
__ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
// Initialize the rest of the slots to undefined.
__ mov(ebx, Factory::undefined_value());
for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
__ mov(Operand(eax, Context::SlotOffset(i)), ebx);
}
// Return and remove the on-stack parameter.
__ mov(esi, Operand(eax));
__ ret(1 * kPointerSize);
// Need to collect. Call into runtime system.
__ bind(&gc);
__ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
}
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
int size = JSArray::kSize + elements_size;
// Allocate both the JS array and the elements array in one big
// allocation. This avoid multiple limit checks.
Label gc;
__ AllocateInNewSpace(size, eax, ebx, ecx, &gc, TAG_OBJECT);
// Get the boilerplate from the stack.
__ mov(ecx, Operand(esp, 1 * kPointerSize));
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
__ mov(ebx, FieldOperand(ecx, i));
__ mov(FieldOperand(eax, i), ebx);
}
}
if (length_ > 0) {
// Get hold of the elements array of the boilerplate and setup the
// elements pointer in the resulting object.
__ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
__ lea(edx, Operand(eax, JSArray::kSize));
__ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
// Copy the elements array.
for (int i = 0; i < elements_size; i += kPointerSize) {
__ mov(ebx, FieldOperand(ecx, i));
__ mov(FieldOperand(edx, i), ebx);
}
}
// Return and remove the on-stack parameter.
__ ret(1 * kPointerSize);
__ bind(&gc);
ExternalReference runtime(Runtime::kCloneShallowLiteralBoilerplate);
__ TailCallRuntime(runtime, 1, 1);
}
// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
void ToBooleanStub::Generate(MacroAssembler* masm) {
Label false_result, true_result, not_string;
@ -7716,90 +7441,18 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
static const int kDisplacement = 2 * kPointerSize;
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
Label runtime;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
__ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor_frame);
// Get the length from the frame.
__ mov(ecx, Operand(esp, 1 * kPointerSize));
__ jmp(&try_allocate);
__ j(not_equal, &runtime);
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
__ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ mov(Operand(esp, 1 * kPointerSize), ecx);
__ lea(edx, Operand(edx, ecx, times_2, kDisplacement));
__ mov(Operand(esp, 2 * kPointerSize), edx);
// Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array.
Label add_arguments_object;
__ bind(&try_allocate);
__ test(ecx, Operand(ecx));
__ j(zero, &add_arguments_object);
__ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
__ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSize));
// Do the allocation of both objects in one go.
__ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
// Get the arguments boilerplate from the current (global) context.
int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
__ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
__ mov(edi, Operand(edi, offset));
// Copy the JS object part.
for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
__ mov(ebx, FieldOperand(edi, i));
__ mov(FieldOperand(eax, i), ebx);
}
// Setup the callee in-object property.
ASSERT(Heap::arguments_callee_index == 0);
__ mov(ebx, Operand(esp, 3 * kPointerSize));
__ mov(FieldOperand(eax, JSObject::kHeaderSize), ebx);
// Get the length (smi tagged) and set that as an in-object property too.
ASSERT(Heap::arguments_length_index == 1);
__ mov(ecx, Operand(esp, 1 * kPointerSize));
__ mov(FieldOperand(eax, JSObject::kHeaderSize + kPointerSize), ecx);
// If there are no actual arguments, we're done.
Label done;
__ test(ecx, Operand(ecx));
__ j(zero, &done);
// Get the parameters pointer from the stack and untag the length.
__ mov(edx, Operand(esp, 2 * kPointerSize));
__ sar(ecx, kSmiTagSize);
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
Immediate(Factory::fixed_array_map()));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
// Copy the fixed array slots.
Label loop;
__ bind(&loop);
__ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
__ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
__ add(Operand(edi), Immediate(kPointerSize));
__ sub(Operand(edx), Immediate(kPointerSize));
__ dec(ecx);
__ test(ecx, Operand(ecx));
__ j(not_zero, &loop);
// Return and remove the on-stack parameters.
__ bind(&done);
__ ret(3 * kPointerSize);
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
__ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
@ -8653,7 +8306,6 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
__ and_(ecx, Operand(edi));
ASSERT(kStringEncodingMask == kAsciiStringTag);
__ test(ecx, Immediate(kAsciiStringTag));
__ j(zero, &non_ascii);
// Allocate an acsii cons string.
@ -8696,7 +8348,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
Label non_ascii_string_add_flat_result;
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
ASSERT(kStringEncodingMask == kAsciiStringTag);
ASSERT(kAsciiStringTag != 0);
__ test(ecx, Immediate(kAsciiStringTag));
__ j(zero, &non_ascii_string_add_flat_result);
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));

25
deps/v8/src/ia32/codegen-ia32.h

@ -434,7 +434,7 @@ class CodeGenerator: public AstVisitor {
void GenericBinaryOperation(
Token::Value op,
StaticType* type,
SmiAnalysis* type,
OverwriteMode overwrite_mode);
// If possible, combine two constant smi values using op to produce
@ -447,7 +447,7 @@ class CodeGenerator: public AstVisitor {
void ConstantSmiBinaryOperation(Token::Value op,
Result* operand,
Handle<Object> constant_operand,
StaticType* type,
SmiAnalysis* type,
bool reversed,
OverwriteMode overwrite_mode);
@ -459,8 +459,7 @@ class CodeGenerator: public AstVisitor {
Result* right,
OverwriteMode overwrite_mode);
void Comparison(AstNode* node,
Condition cc,
void Comparison(Condition cc,
bool strict,
ControlDestination* destination);
@ -666,8 +665,7 @@ class GenericBinaryOpStub: public CodeStub {
mode_(mode),
flags_(flags),
args_in_registers_(false),
args_reversed_(false),
name_(NULL) {
args_reversed_(false) {
use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@ -686,7 +684,6 @@ class GenericBinaryOpStub: public CodeStub {
bool args_in_registers_; // Arguments passed in registers not on the stack.
bool args_reversed_; // Left and right argument are swapped.
bool use_sse3_;
char* name_;
const char* GetName();
@ -728,8 +725,8 @@ class GenericBinaryOpStub: public CodeStub {
bool ArgsInRegistersSupported() {
return ((op_ == Token::ADD) || (op_ == Token::SUB)
|| (op_ == Token::MUL) || (op_ == Token::DIV))
&& flags_ != NO_SMI_CODE_IN_STUB;
|| (op_ == Token::MUL) || (op_ == Token::DIV))
&& flags_ != NO_SMI_CODE_IN_STUB;
}
bool IsOperationCommutative() {
return (op_ == Token::ADD) || (op_ == Token::MUL);
@ -763,11 +760,11 @@ class StringAddStub: public CodeStub {
void Generate(MacroAssembler* masm);
void GenerateCopyCharacters(MacroAssembler* masm,
Register desc,
Register src,
Register count,
Register scratch,
bool ascii);
Register desc,
Register src,
Register count,
Register scratch,
bool ascii);
// Should the stub check whether arguments are strings?
bool string_check_;

8
deps/v8/src/ia32/disasm-ia32.cc

@ -1049,14 +1049,6 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (*data == 0x57) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("xorpd %s,%s",
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else {
UnimplementedInstruction();
}

381
deps/v8/src/ia32/fast-codegen-ia32.cc

@ -412,24 +412,46 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
Variable* var = decl->proxy()->var();
ASSERT(var != NULL); // Must have been resolved.
Slot* slot = var->slot();
Property* prop = var->AsProperty();
if (slot != NULL) {
switch (slot->type()) {
case Slot::PARAMETER: // Fall through.
case Slot::LOCAL:
if (decl->mode() == Variable::CONST) {
__ mov(Operand(ebp, SlotOffset(var->slot())),
Immediate(Factory::the_hole_value()));
} else if (decl->fun() != NULL) {
Visit(decl->fun());
__ pop(Operand(ebp, SlotOffset(var->slot())));
}
break;
case Slot::CONTEXT:
// The variable in the decl always resides in the current context.
ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
ASSERT(slot != NULL); // No global declarations here.
// We have 3 cases for slots: LOOKUP, LOCAL, CONTEXT.
switch (slot->type()) {
case Slot::LOOKUP: {
__ push(esi);
__ push(Immediate(var->name()));
// Declaration nodes are always introduced in one of two modes.
ASSERT(decl->mode() == Variable::VAR || decl->mode() == Variable::CONST);
PropertyAttributes attr =
(decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
__ push(Immediate(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (decl->mode() == Variable::CONST) {
__ push(Immediate(Factory::the_hole_value()));
} else if (decl->fun() != NULL) {
Visit(decl->fun());
} else {
__ push(Immediate(Smi::FromInt(0))); // No initial value!
}
__ CallRuntime(Runtime::kDeclareContextSlot, 4);
break;
}
case Slot::LOCAL:
if (decl->mode() == Variable::CONST) {
__ mov(Operand(ebp, SlotOffset(var->slot())),
Immediate(Factory::the_hole_value()));
} else if (decl->fun() != NULL) {
Visit(decl->fun());
__ pop(Operand(ebp, SlotOffset(var->slot())));
}
break;
case Slot::CONTEXT:
// The variable in the decl always resides in the current context.
ASSERT(function_->scope()->ContextChainLength(slot->var()->scope()) == 0);
if (decl->mode() == Variable::CONST) {
__ mov(eax, Immediate(Factory::the_hole_value()));
if (FLAG_debug_code) {
// Check if we have the correct context pointer.
__ mov(ebx,
@ -437,70 +459,26 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
__ cmp(ebx, Operand(esi));
__ Check(equal, "Unexpected declaration in current context.");
}
if (decl->mode() == Variable::CONST) {
__ mov(eax, Immediate(Factory::the_hole_value()));
__ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax);
// No write barrier since the hole value is in old space.
} else if (decl->fun() != NULL) {
Visit(decl->fun());
__ pop(eax);
__ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax);
int offset = Context::SlotOffset(slot->index());
__ RecordWrite(esi, offset, eax, ecx);
}
break;
case Slot::LOOKUP: {
__ push(esi);
__ push(Immediate(var->name()));
// Declaration nodes are always introduced in one of two modes.
ASSERT(decl->mode() == Variable::VAR ||
decl->mode() == Variable::CONST);
PropertyAttributes attr =
(decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
__ push(Immediate(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (decl->mode() == Variable::CONST) {
__ push(Immediate(Factory::the_hole_value()));
} else if (decl->fun() != NULL) {
Visit(decl->fun());
} else {
__ push(Immediate(Smi::FromInt(0))); // No initial value!
}
__ CallRuntime(Runtime::kDeclareContextSlot, 4);
break;
}
}
} else if (prop != NULL) {
if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
// We are declaring a function or constant that rewrites to a
// property. Use (keyed) IC to set the initial value.
ASSERT_EQ(Expression::kValue, prop->obj()->context());
Visit(prop->obj());
ASSERT_EQ(Expression::kValue, prop->key()->context());
Visit(prop->key());
if (decl->fun() != NULL) {
ASSERT_EQ(Expression::kValue, decl->fun()->context());
__ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax);
// No write barrier since the_hole_value is in old space.
ASSERT(!Heap::InNewSpace(*Factory::the_hole_value()));
} else if (decl->fun() != NULL) {
Visit(decl->fun());
__ pop(eax);
} else {
__ Set(eax, Immediate(Factory::the_hole_value()));
if (FLAG_debug_code) {
// Check if we have the correct context pointer.
__ mov(ebx,
CodeGenerator::ContextOperand(esi, Context::FCONTEXT_INDEX));
__ cmp(ebx, Operand(esi));
__ Check(equal, "Unexpected declaration in current context.");
}
__ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax);
int offset = Context::SlotOffset(slot->index());
__ RecordWrite(esi, offset, eax, ecx);
}
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// Absence of a test eax instruction following the call
// indicates that none of the load was inlined.
// Value in eax is ignored (declarations are statements). Receiver
// and key on stack are discarded.
__ add(Operand(esp), Immediate(2 * kPointerSize));
}
break;
default:
UNREACHABLE();
}
}
@ -515,6 +493,20 @@ void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
Expression* expr = stmt->expression();
if (expr->AsLiteral() != NULL) {
__ mov(eax, expr->AsLiteral()->handle());
} else {
ASSERT_EQ(Expression::kValue, expr->context());
Visit(expr);
__ pop(eax);
}
EmitReturnSequence(stmt->statement_pos());
}
void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
@ -535,20 +527,14 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
EmitVariableLoad(expr->var(), expr->context());
}
void FastCodeGenerator::EmitVariableLoad(Variable* var,
Expression::Context context) {
Expression* rewrite = var->rewrite();
Expression* rewrite = expr->var()->rewrite();
if (rewrite == NULL) {
ASSERT(var->is_global());
ASSERT(expr->var()->is_global());
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in ecx and the global
// object on the stack.
__ push(CodeGenerator::GlobalObject());
__ mov(ecx, var->name());
__ mov(ecx, expr->name());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
// By emitting a nop we make sure that we do not have a test eax
@ -556,7 +542,8 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
// Remember that the assembler may choose to do peephole optimization
// (eg, push/pop elimination).
__ nop();
DropAndMove(context, eax);
DropAndMove(expr->context(), eax);
} else if (rewrite->AsSlot() != NULL) {
Slot* slot = rewrite->AsSlot();
if (FLAG_debug_code) {
@ -577,7 +564,7 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
UNREACHABLE();
}
}
Move(context, slot, eax);
Move(expr->context(), slot, eax);
} else {
Comment cmnt(masm_, "Variable rewritten to Property");
// A variable has been rewritten into an explicit access to
@ -611,8 +598,9 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
// Notice: We must not have a "test eax, ..." instruction after
// the call. It is treated specially by the LoadIC code.
__ nop();
// Drop key and object left on the stack by IC.
DropAndMove(context, eax, 2);
// Drop key and object left on the stack by IC, and push the result.
DropAndMove(expr->context(), eax, 2);
}
}
@ -646,14 +634,35 @@ void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Label exists;
// Registers will be used as follows:
// edi = JS function.
// ebx = literals array.
// eax = boilerplate
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
__ mov(ebx, FieldOperand(edi, JSFunction::kLiteralsOffset));
int literal_offset =
FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
__ mov(eax, FieldOperand(ebx, literal_offset));
__ cmp(eax, Factory::undefined_value());
__ j(not_equal, &exists);
// Create boilerplate if it does not exist.
// Literal array (0).
__ push(ebx);
// Literal index (1).
__ push(Immediate(Smi::FromInt(expr->literal_index())));
// Constant properties (2).
__ push(Immediate(expr->constant_properties()));
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateObjectLiteral, 3);
__ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
__ bind(&exists);
// eax contains boilerplate.
// Clone boilerplate.
__ push(eax);
if (expr->depth() == 1) {
__ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
} else {
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
__ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
}
// If result_saved == true: The result is saved on top of the
@ -749,14 +758,31 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
Label make_clone;
// Fetch the function's literals array.
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
__ mov(ebx, FieldOperand(ebx, JSFunction::kLiteralsOffset));
// Check if the literal's boilerplate has been instantiated.
int offset =
FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
__ mov(eax, FieldOperand(ebx, offset));
__ cmp(eax, Factory::undefined_value());
__ j(not_equal, &make_clone);
// Instantiate the boilerplate.
__ push(ebx);
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(expr->literals()));
__ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
__ bind(&make_clone);
// Clone the boilerplate.
__ push(eax);
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
__ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
} else {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
__ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
}
bool result_saved = false; // Is the result saved to the stack?
@ -826,37 +852,10 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop,
Expression::Context context) {
Literal* key = prop->key()->AsLiteral();
__ mov(ecx, Immediate(key->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
Move(context, eax);
}
void FastCodeGenerator::EmitKeyedPropertyLoad(Expression::Context context) {
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
Move(context, eax);
}
void FastCodeGenerator::EmitCompoundAssignmentOp(Token::Value op,
Expression::Context context) {
GenericBinaryOpStub stub(op,
NO_OVERWRITE,
NO_GENERIC_BINARY_FLAGS);
__ CallStub(&stub);
Move(context, eax);
}
void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
if (var->is_global()) {
// Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in eax, variable name in
@ -961,6 +960,35 @@ void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
UNREACHABLE();
break;
}
} else {
Property* property = var->rewrite()->AsProperty();
ASSERT_NOT_NULL(property);
// Load object and key onto the stack.
Slot* object_slot = property->obj()->AsSlot();
ASSERT_NOT_NULL(object_slot);
Move(Expression::kValue, object_slot, eax);
Literal* key_literal = property->key()->AsLiteral();
ASSERT_NOT_NULL(key_literal);
Move(Expression::kValue, key_literal);
// Value to store was pushed before object and key on the stack.
__ mov(eax, Operand(esp, 2 * kPointerSize));
// Arguments to ic is value in eax, object and key on stack.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
if (expr->context() == Expression::kEffect) {
__ add(Operand(esp), Immediate(3 * kPointerSize));
} else if (expr->context() == Expression::kValue) {
// Value is still on the stack in esp[2 * kPointerSize]
__ add(Operand(esp), Immediate(2 * kPointerSize));
} else {
__ mov(eax, Operand(esp, 2 * kPointerSize));
DropAndMove(expr->context(), eax, 3);
}
}
}
@ -1066,9 +1094,7 @@ void FastCodeGenerator::VisitProperty(Property* expr) {
}
void FastCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> name,
RelocInfo::Mode mode) {
void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) {
// Code common for calls using the IC.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@ -1076,15 +1102,16 @@ void FastCodeGenerator::EmitCallWithIC(Call* expr,
Visit(args->at(i));
ASSERT_EQ(Expression::kValue, args->at(i)->context());
}
__ Set(ecx, Immediate(name));
// Record source position of the IC call.
// Record source position for debugger.
SetSourcePosition(expr->position());
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
__ call(ic, mode);
// Call the IC initialization code.
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
NOT_IN_LOOP);
__ call(ic, reloc_info);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
Move(expr->context(), eax);
// Discard the function left on TOS.
DropAndMove(expr->context(), eax);
}
@ -1101,6 +1128,7 @@ void FastCodeGenerator::EmitCallWithStub(Call* expr) {
__ CallStub(&stub);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
DropAndMove(expr->context(), eax);
}
@ -1114,9 +1142,11 @@ void FastCodeGenerator::VisitCall(Call* expr) {
// Call to the identifier 'eval'.
UNREACHABLE();
} else if (var != NULL && !var->is_this() && var->is_global()) {
// Push global object as receiver for the call IC.
// Call to a global variable.
__ push(Immediate(var->name()));
// Push global object as receiver for the call IC lookup.
__ push(CodeGenerator::GlobalObject());
EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT);
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
// Call to a lookup slot.
@ -1127,8 +1157,9 @@ void FastCodeGenerator::VisitCall(Call* expr) {
Literal* key = prop->key()->AsLiteral();
if (key != NULL && key->handle()->IsSymbol()) {
// Call to a named property, use call IC.
__ push(Immediate(key->handle()));
Visit(prop->obj());
EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
EmitCallWithIC(expr, RelocInfo::CODE_TARGET);
} else {
// Call to a keyed property, use keyed load IC followed by function
// call.
@ -1220,6 +1251,7 @@ void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) {
// Prepare for calling JS runtime function.
__ push(Immediate(expr->name()));
__ mov(eax, CodeGenerator::GlobalObject());
__ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
}
@ -1232,18 +1264,19 @@ void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
}
if (expr->is_jsruntime()) {
// Call the JS runtime function via a call IC.
__ Set(ecx, Immediate(expr->name()));
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
// Call the JS runtime function.
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
NOT_IN_LOOP);
__ call(ic, RelocInfo::CODE_TARGET);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
DropAndMove(expr->context(), eax);
} else {
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
Move(expr->context(), eax);
}
Move(expr->context(), eax);
}
@ -1652,65 +1685,7 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
Move(expr->context(), eax);
}
Register FastCodeGenerator::result_register() { return eax; }
Register FastCodeGenerator::context_register() { return esi; }
void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ mov(Operand(ebp, frame_offset), value);
}
void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
__ mov(dst, CodeGenerator::ContextOperand(esi, context_index));
}
// ----------------------------------------------------------------------------
// Non-local control flow support.
void FastCodeGenerator::EnterFinallyBlock() {
// Cook return address on top of stack (smi encoded Code* delta)
ASSERT(!result_register().is(edx));
__ mov(edx, Operand(esp, 0));
__ sub(Operand(edx), Immediate(masm_->CodeObject()));
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
ASSERT_EQ(0, kSmiTag);
__ add(edx, Operand(edx)); // Convert to smi.
__ mov(Operand(esp, 0), edx);
// Store result register while executing finally block.
__ push(result_register());
}
void FastCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(edx));
// Restore result register from stack.
__ pop(result_register());
// Uncook return address.
__ mov(edx, Operand(esp, 0));
__ sar(edx, 1); // Convert smi to int.
__ add(Operand(edx), Immediate(masm_->CodeObject()));
__ mov(Operand(esp, 0), edx);
// And return.
__ ret(0);
}
void FastCodeGenerator::ThrowException() {
__ push(result_register());
__ CallRuntime(Runtime::kThrow, 1);
}
#undef __
} } // namespace v8::internal

165
deps/v8/src/ia32/ic-ia32.cc

@ -48,13 +48,9 @@ namespace internal {
// must always call a backup property load that is complete.
// This function is safe to call if the receiver has fast properties,
// or if name is not a symbol, and will jump to the miss_label in that case.
static void GenerateDictionaryLoad(MacroAssembler* masm,
Label* miss_label,
Register r0,
Register r1,
Register r2,
Register name,
DictionaryCheck check_dictionary) {
static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
Register r0, Register r1, Register r2,
Register name) {
// Register use:
//
// r0 - used to hold the property dictionary.
@ -90,15 +86,11 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
__ cmp(r0, JS_BUILTINS_OBJECT_TYPE);
__ j(equal, miss_label, not_taken);
// Load properties array.
__ mov(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
// Check that the properties array is a dictionary.
if (check_dictionary == CHECK_DICTIONARY) {
__ cmp(FieldOperand(r0, HeapObject::kMapOffset),
Immediate(Factory::hash_table_map()));
__ j(not_equal, miss_label);
}
__ mov(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
__ cmp(FieldOperand(r0, HeapObject::kMapOffset),
Immediate(Factory::hash_table_map()));
__ j(not_equal, miss_label);
// Compute the capacity mask.
const int kCapacityOffset =
@ -231,8 +223,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- esp[4] : name
// -- esp[8] : receiver
// -----------------------------------
Label slow, check_string, index_int, index_string;
Label check_pixel_array, probe_dictionary;
Label slow, check_string, index_int, index_string, check_pixel_array;
// Load name and receiver.
__ mov(eax, Operand(esp, kPointerSize));
@ -311,72 +302,17 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ test(ebx, Immediate(String::kIsArrayIndexMask));
__ j(not_zero, &index_string, not_taken);
// Is the string a symbol?
// If the string is a symbol, do a quick inline probe of the receiver's
// dictionary, if it exists.
__ movzx_b(ebx, FieldOperand(edx, Map::kInstanceTypeOffset));
__ test(ebx, Immediate(kIsSymbolMask));
__ j(zero, &slow, not_taken);
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary leaving result in ecx.
__ mov(ebx, FieldOperand(ecx, JSObject::kPropertiesOffset));
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(Factory::hash_table_map()));
__ j(equal, &probe_dictionary);
// Load the map of the receiver, compute the keyed lookup cache hash
// based on 32 bits of the map pointer and the string hash.
__ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
__ mov(edx, ebx);
__ shr(edx, KeyedLookupCache::kMapHashShift);
__ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
__ shr(eax, String::kHashShift);
__ xor_(edx, Operand(eax));
__ and_(edx, KeyedLookupCache::kCapacityMask);
// Load the key (consisting of map and symbol) from the cache and
// check for match.
ExternalReference cache_keys
= ExternalReference::keyed_lookup_cache_keys();
__ mov(edi, edx);
__ shl(edi, kPointerSizeLog2 + 1);
__ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow);
__ add(Operand(edi), Immediate(kPointerSize));
__ mov(edi, Operand::StaticArray(edi, times_1, cache_keys));
__ cmp(edi, Operand(esp, kPointerSize));
__ j(not_equal, &slow);
// Get field offset and check that it is an in-object property.
ExternalReference cache_field_offsets
= ExternalReference::keyed_lookup_cache_field_offsets();
__ mov(eax,
Operand::StaticArray(edx, times_pointer_size, cache_field_offsets));
__ movzx_b(edx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
__ cmp(eax, Operand(edx));
__ j(above_equal, &slow);
// Load in-object property.
__ sub(eax, Operand(edx));
__ movzx_b(edx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ add(eax, Operand(edx));
__ mov(eax, FieldOperand(ecx, eax, times_pointer_size, 0));
__ ret(0);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
__ bind(&probe_dictionary);
GenerateDictionaryLoad(masm,
&slow,
ebx,
ecx,
edx,
eax,
DICTIONARY_CHECK_DONE);
// Probe the dictionary leaving result in ecx.
GenerateDictionaryLoad(masm, &slow, ebx, ecx, edx, eax);
GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, edx);
__ mov(eax, Operand(ecx));
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0);
// If the hash field contains an array index pick it out. The assert checks
// that the constants for the maximum number of digits for an array index
// cached in the hash field and the number of bits reserved for it does not
@ -888,16 +824,13 @@ Object* CallIC_Miss(Arguments args);
void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
Label number, non_number, non_string, boolean, probe, miss;
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Get the name of the function from the stack; 2 ~ return address, receiver
__ mov(ecx, Operand(esp, (argc + 2) * kPointerSize));
// Probe the stub cache.
Code::Flags flags =
@ -943,7 +876,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm, argc);
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
}
@ -951,34 +884,27 @@ static void GenerateNormalHelper(MacroAssembler* masm,
int argc,
bool is_global_object,
Label* miss) {
// ----------- S t a t e -------------
// -- ecx : name
// -- edx : receiver
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
// Search dictionary - put result in register edi.
__ mov(edi, edx);
GenerateDictionaryLoad(masm, miss, eax, edi, ebx, ecx, CHECK_DICTIONARY);
// Search dictionary - put result in register edx.
GenerateDictionaryLoad(masm, miss, eax, edx, ebx, ecx);
// Check that the result is not a smi.
__ test(edi, Immediate(kSmiTagMask));
// Move the result to register edi and check that it isn't a smi.
__ mov(edi, Operand(edx));
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, miss, not_taken);
// Check that the value is a JavaScript function, fetching its map into eax.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
// Check that the value is a JavaScript function.
__ CmpObjectType(edx, JS_FUNCTION_TYPE, edx);
__ j(not_equal, miss, not_taken);
// Check that the function has been loaded. eax holds function's map.
__ mov(eax, FieldOperand(eax, Map::kBitField2Offset));
__ test(eax, Immediate(1 << Map::kNeedsLoading));
// Check that the function has been loaded.
__ mov(edx, FieldOperand(edi, JSFunction::kMapOffset));
__ mov(edx, FieldOperand(edx, Map::kBitField2Offset));
__ test(edx, Immediate(1 << Map::kNeedsLoading));
__ j(not_zero, miss, not_taken);
// Patch the receiver on stack with the global proxy if necessary.
// Patch the receiver with the global proxy if necessary.
if (is_global_object) {
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
__ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
__ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
}
@ -991,17 +917,14 @@ static void GenerateNormalHelper(MacroAssembler* masm,
void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
Label miss, global_object, non_global_object;
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Get the name of the function from the stack; 2 ~ return address, receiver.
__ mov(ecx, Operand(esp, (argc + 2) * kPointerSize));
// Check that the receiver isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
@ -1050,33 +973,33 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm, argc);
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
}
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
void CallIC::Generate(MacroAssembler* masm,
int argc,
const ExternalReference& f) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Get the name of the function to call from the stack.
// 2 ~ receiver, return address.
__ mov(ebx, Operand(esp, (argc + 2) * kPointerSize));
// Enter an internal frame.
__ EnterInternalFrame();
// Push the receiver and the name of the function.
__ push(edx);
__ push(ecx);
__ push(ebx);
// Call the entry.
CEntryStub stub(1);
__ mov(eax, Immediate(2));
__ mov(ebx, Immediate(ExternalReference(IC_Utility(kCallIC_Miss))));
__ mov(ebx, Immediate(f));
__ CallStub(&stub);
// Move result to edi and exit the internal frame.
@ -1088,11 +1011,11 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); // receiver
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &invoke, not_taken);
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
__ cmp(ebx, JS_GLOBAL_OBJECT_TYPE);
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ cmp(ecx, JS_GLOBAL_OBJECT_TYPE);
__ j(equal, &global);
__ cmp(ebx, JS_BUILTINS_OBJECT_TYPE);
__ cmp(ecx, JS_BUILTINS_OBJECT_TYPE);
__ j(not_equal, &invoke);
// Patch the receiver on the stack.
@ -1165,7 +1088,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Search the dictionary placing the result in eax.
__ bind(&probe);
GenerateDictionaryLoad(masm, &miss, edx, eax, ebx, ecx, CHECK_DICTIONARY);
GenerateDictionaryLoad(masm, &miss, edx, eax, ebx, ecx);
GenerateCheckNonObjectOrLoaded(masm, &miss, eax, edx);
__ ret(0);

48
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -504,13 +504,6 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
}
void MacroAssembler::PopTryHandler() {
ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
}
Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
JSObject* holder, Register holder_reg,
Register scratch,
@ -841,9 +834,10 @@ void MacroAssembler::AllocateTwoByteString(Register result,
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
mov(scratch1, length);
ASSERT(kShortSize == 2);
// scratch1 = length * 2 + kObjectAlignmentMask.
lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
shl(scratch1, 1);
add(Operand(scratch1), Immediate(kObjectAlignmentMask));
and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
// Allocate two byte string in new space.
@ -1022,37 +1016,17 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
void MacroAssembler::CallStub(CodeStub* stub) {
ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
call(stub->GetCode(), RelocInfo::CODE_TARGET);
}
Object* MacroAssembler::TryCallStub(CodeStub* stub) {
ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
Object* result = stub->TryGetCode();
if (!result->IsFailure()) {
call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
}
return result;
}
void MacroAssembler::TailCallStub(CodeStub* stub) {
ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
}
Object* MacroAssembler::TryTailCallStub(CodeStub* stub) {
ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
Object* result = stub->TryGetCode();
if (!result->IsFailure()) {
jmp(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
}
return result;
}
void MacroAssembler::StubReturn(int argc) {
ASSERT(argc >= 1 && generating_stub());
ret((argc - 1) * kPointerSize);
@ -1357,18 +1331,6 @@ void MacroAssembler::Ret() {
}
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
add(Operand(esp), Immediate(stack_elements * kPointerSize));
}
}
void MacroAssembler::Move(Register dst, Handle<Object> value) {
mov(dst, value);
}
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));

22
deps/v8/src/ia32/macro-assembler-ia32.h

@ -149,8 +149,6 @@ class MacroAssembler: public Assembler {
// address must be pushed before calling this helper.
void PushTryHandler(CodeLocation try_location, HandlerType type);
// Unlink the stack handler on top of the stack from the try handler chain.
void PopTryHandler();
// ---------------------------------------------------------------------------
// Inline caching support
@ -287,22 +285,12 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Runtime calls
// Call a code stub. Generate the code if necessary.
// Call a code stub.
void CallStub(CodeStub* stub);
// Call a code stub and return the code object called. Try to generate
// the code if necessary. Do not perform a GC but instead return a retry
// after GC failure.
Object* TryCallStub(CodeStub* stub);
// Tail call a code stub (jump). Generate the code if necessary.
// Tail call a code stub (jump).
void TailCallStub(CodeStub* stub);
// Tail call a code stub (jump) and return the code object called. Try to
// generate the code if necessary. Do not perform a GC but instead return
// a retry after GC failure.
Object* TryTailCallStub(CodeStub* stub);
// Return from a code stub after popping its arguments.
void StubReturn(int argc);
@ -335,12 +323,6 @@ class MacroAssembler: public Assembler {
void Ret();
void Drop(int element_count);
void Call(Label* target) { call(target); }
void Move(Register target, Handle<Object> value);
struct Unresolved {
int pc;
uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.

143
deps/v8/src/ia32/stub-cache-ia32.cc

@ -152,10 +152,11 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
}
template <typename Pushable>
static void PushInterceptorArguments(MacroAssembler* masm,
Register receiver,
Register holder,
Register name,
Pushable name,
JSObject* holder_obj) {
__ push(receiver);
__ push(holder);
@ -284,10 +285,11 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
}
template <class Pushable>
static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
Register receiver,
Register holder,
Register name,
Pushable name,
JSObject* holder_obj) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
@ -493,8 +495,8 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(const ParameterCount& arguments, Register name)
: arguments_(arguments), argc_(arguments.immediate()), name_(name) {}
explicit CallInterceptorCompiler(const ParameterCount& arguments)
: arguments_(arguments), argc_(arguments.immediate()) {}
void CompileCacheable(MacroAssembler* masm,
StubCompiler* stub_compiler,
@ -525,17 +527,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
}
__ EnterInternalFrame();
__ push(holder); // Save the holder.
__ push(name_); // Save the name.
__ push(holder); // save the holder
CompileCallLoadPropertyWithInterceptor(masm,
receiver,
holder,
name_,
holder_obj);
CompileCallLoadPropertyWithInterceptor(
masm,
receiver,
holder,
// Under EnterInternalFrame this refers to name.
Operand(ebp, (argc_ + 3) * kPointerSize),
holder_obj);
__ pop(name_); // Restore the name.
__ pop(receiver); // Restore the holder.
__ pop(receiver); // restore holder
__ LeaveInternalFrame();
__ cmp(eax, Factory::no_interceptor_result_sentinel());
@ -575,13 +577,11 @@ class CallInterceptorCompiler BASE_EMBEDDED {
JSObject* holder_obj,
Label* miss_label) {
__ EnterInternalFrame();
// Save the name_ register across the call.
__ push(name_);
PushInterceptorArguments(masm,
receiver,
holder,
name_,
Operand(ebp, (argc_ + 3) * kPointerSize),
holder_obj);
ExternalReference ref = ExternalReference(
@ -592,15 +592,12 @@ class CallInterceptorCompiler BASE_EMBEDDED {
CEntryStub stub(1);
__ CallStub(&stub);
// Restore the name_ register.
__ pop(name_);
__ LeaveInternalFrame();
}
private:
const ParameterCount& arguments_;
int argc_;
Register name_;
};
@ -757,7 +754,7 @@ void StubCompiler::GenerateLoadField(JSObject* object,
}
bool StubCompiler::GenerateLoadCallback(JSObject* object,
void StubCompiler::GenerateLoadCallback(JSObject* object,
JSObject* holder,
Register receiver,
Register name_reg,
@ -765,8 +762,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
Register scratch2,
AccessorInfo* callback,
String* name,
Label* miss,
Failure** failure) {
Label* miss) {
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
__ j(zero, miss, not_taken);
@ -802,14 +798,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
ApiGetterEntryStub stub(callback_handle, &fun);
// Calling the stub may try to allocate (if the code is not already
// generated). Do not allow the call to perform a garbage
// collection but instead return the allocation failure object.
Object* result = masm()->TryCallStub(&stub);
if (result->IsFailure()) {
*failure = Failure::cast(result);
return false;
}
__ CallStub(&stub);
// We need to avoid using eax since that now holds the result.
Register tmp = other.is(eax) ? reg : other;
@ -817,7 +806,6 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
__ LeaveInternalFrame();
__ ret(0);
return true;
}
@ -897,11 +885,6 @@ Object* CallStubCompiler::CompileCallField(Object* object,
int index,
String* name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
Label miss;
@ -916,7 +899,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
// Do the right check and compute the holder register.
Register reg =
CheckPrototypes(JSObject::cast(object), edx, holder,
ebx, eax, name, &miss);
ebx, ecx, name, &miss);
GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
@ -952,11 +935,6 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
String* name,
CheckType check) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
Label miss;
@ -978,7 +956,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
case RECEIVER_MAP_CHECK:
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), edx, holder,
ebx, eax, name, &miss);
ebx, ecx, name, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@ -990,15 +968,15 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
case STRING_CHECK:
// Check that the object is a two-byte string or a symbol.
__ mov(eax, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
__ cmp(eax, FIRST_NONSTRING_TYPE);
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ cmp(ecx, FIRST_NONSTRING_TYPE);
__ j(above_equal, &miss, not_taken);
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ecx);
CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder,
ebx, edx, name, &miss);
break;
@ -1007,14 +985,14 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the object is a smi or a heap number.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &fast, taken);
__ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
__ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
__ j(not_equal, &miss, not_taken);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::NUMBER_FUNCTION_INDEX,
eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ecx);
CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder,
ebx, edx, name, &miss);
break;
}
@ -1030,15 +1008,15 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(),
Context::BOOLEAN_FUNCTION_INDEX,
eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ecx);
CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder,
ebx, edx, name, &miss);
break;
}
case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
CheckPrototypes(JSObject::cast(object), edx, holder,
ebx, eax, name, &miss);
ebx, ecx, name, &miss);
// Make sure object->HasFastElements().
// Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
@ -1081,11 +1059,6 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
JSObject* holder,
String* name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
Label miss;
@ -1098,7 +1071,7 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
// Get the receiver from the stack.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
CallInterceptorCompiler compiler(arguments(), ecx);
CallInterceptorCompiler compiler(arguments());
CompileLoadInterceptor(&compiler,
this,
masm(),
@ -1108,7 +1081,7 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
&lookup,
edx,
ebx,
edi,
ecx,
&miss);
// Restore receiver.
@ -1147,11 +1120,6 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
JSFunction* function,
String* name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
Label miss;
@ -1170,32 +1138,15 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
}
// Check that the maps haven't changed.
CheckPrototypes(object, edx, holder, ebx, eax, name, &miss);
CheckPrototypes(object, edx, holder, ebx, ecx, name, &miss);
// Get the value from the cell.
__ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
__ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
if (Heap::InNewSpace(function)) {
// We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
// function can all use this call IC. Before we load through the
// function, we have to verify that it still is a function.
__ test(edi, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
__ j(not_equal, &miss, not_taken);
// Check the shared function info. Make sure it hasn't changed.
__ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset),
Immediate(Handle<SharedFunctionInfo>(function->shared())));
__ j(not_equal, &miss, not_taken);
} else {
__ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
__ j(not_equal, &miss, not_taken);
}
__ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
__ j(not_equal, &miss, not_taken);
// Patch the receiver on the stack with the global proxy.
if (object->IsGlobalObject()) {
@ -1469,10 +1420,10 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
}
Object* LoadStubCompiler::CompileLoadCallback(String* name,
JSObject* object,
Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
JSObject* holder,
AccessorInfo* callback) {
AccessorInfo* callback,
String* name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@ -1481,11 +1432,8 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name,
Label miss;
__ mov(eax, Operand(esp, kPointerSize));
Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
callback, name, &miss, &failure);
if (!success) return failure;
GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
callback, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@ -1649,11 +1597,8 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(receiver, holder, ecx, eax, ebx, edx,
callback, name, &miss, &failure);
if (!success) return failure;
GenerateLoadCallback(receiver, holder, ecx, eax, ebx, edx,
callback, name, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_callback, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);

11
deps/v8/src/ia32/virtual-frame-ia32.cc

@ -925,17 +925,14 @@ Result VirtualFrame::CallKeyedStoreIC() {
Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
int arg_count,
int loop_nesting) {
// Function name, arguments, and receiver are on top of the frame.
// The IC expects the name in ecx and the rest on the stack and
// drops them all.
// Arguments, receiver, and function name are on top of the frame.
// The IC expects them on the stack. It does not drop the function
// name slot (but it does drop the rest).
InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = cgen()->ComputeCallInitialize(arg_count, in_loop);
// Spill args, receiver, and function. The call will drop args and
// receiver.
Result name = Pop();
PrepareForCall(arg_count + 1, arg_count + 1); // Arguments + receiver.
name.ToRegister(ecx);
name.Unuse();
PrepareForCall(arg_count + 2, arg_count + 1);
return RawCallCodeObject(ic, mode);
}

6
deps/v8/src/ia32/virtual-frame-ia32.h

@ -341,9 +341,9 @@ class VirtualFrame: public ZoneObject {
// of the frame. Key and receiver are not dropped.
Result CallKeyedStoreIC();
// Call call IC. Function name, arguments, and receiver are found on top
// of the frame and dropped by the call. The argument count does not
// include the receiver.
// Call call IC. Arguments, reciever, and function name are found
// on top of the frame. Function name slot is not dropped. The
// argument count does not include the receiver.
Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
// Allocate and call JS function as constructor. Arguments,

22
deps/v8/src/ic.cc

@ -409,7 +409,7 @@ Object* CallIC::LoadFunction(State state,
if (!lookup.IsValid()) {
// If the object does not have the requested property, check which
// exception we need to throw.
if (IsContextual(object)) {
if (is_contextual()) {
return ReferenceError("not_defined", name);
}
return TypeError("undefined_method", object, name);
@ -428,7 +428,7 @@ Object* CallIC::LoadFunction(State state,
// If the object does not have the requested property, check which
// exception we need to throw.
if (attr == ABSENT) {
if (IsContextual(object)) {
if (is_contextual()) {
return ReferenceError("not_defined", name);
}
return TypeError("undefined_method", object, name);
@ -628,7 +628,7 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
// If lookup is invalid, check if we need to throw an exception.
if (!lookup.IsValid()) {
if (FLAG_strict || IsContextual(object)) {
if (FLAG_strict || is_contextual()) {
return ReferenceError("not_defined", name);
}
LOG(SuspectReadEvent(*name, *object));
@ -671,7 +671,7 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
if (result->IsFailure()) return result;
// If the property is not present, check if we need to throw an
// exception.
if (attr == ABSENT && IsContextual(object)) {
if (attr == ABSENT && is_contextual()) {
return ReferenceError("not_defined", name);
}
return result;
@ -843,7 +843,7 @@ Object* KeyedLoadIC::Load(State state,
// If lookup is invalid, check if we need to throw an exception.
if (!lookup.IsValid()) {
if (FLAG_strict || IsContextual(object)) {
if (FLAG_strict || is_contextual()) {
return ReferenceError("not_defined", name);
}
}
@ -859,7 +859,7 @@ Object* KeyedLoadIC::Load(State state,
if (result->IsFailure()) return result;
// If the property is not present, check if we need to throw an
// exception.
if (attr == ABSENT && IsContextual(object)) {
if (attr == ABSENT && is_contextual()) {
return ReferenceError("not_defined", name);
}
return result;
@ -1292,6 +1292,16 @@ Object* CallIC_Miss(Arguments args) {
}
void CallIC::GenerateInitialize(MacroAssembler* masm, int argc) {
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
}
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
}
// Used from ic_<arch>.cc.
Object* LoadIC_Miss(Arguments args) {
NoHandleAllocation na;

24
deps/v8/src/ic.h

@ -33,11 +33,6 @@
namespace v8 {
namespace internal {
// Flag indicating whether an IC stub needs to check that a backing
// store is in dictionary case.
enum DictionaryCheck { CHECK_DICTIONARY, DICTIONARY_CHECK_DONE };
// IC_UTIL_LIST defines all utility functions called from generated
// inline caching code. The argument for the macro, ICU, is the function name.
#define IC_UTIL_LIST(ICU) \
@ -104,16 +99,7 @@ class IC {
// Returns if this IC is for contextual (no explicit receiver)
// access to properties.
bool IsContextual(Handle<Object> receiver) {
if (receiver->IsGlobalObject()) {
return SlowIsContextual();
} else {
ASSERT(!SlowIsContextual());
return false;
}
}
bool SlowIsContextual() {
bool is_contextual() {
return ComputeMode() == RelocInfo::CODE_TARGET_CONTEXT;
}
@ -189,14 +175,16 @@ class CallIC: public IC {
// Code generator routines.
static void GenerateInitialize(MacroAssembler* masm, int argc) {
GenerateMiss(masm, argc);
}
static void GenerateInitialize(MacroAssembler* masm, int argc);
static void GenerateMiss(MacroAssembler* masm, int argc);
static void GenerateMegamorphic(MacroAssembler* masm, int argc);
static void GenerateNormal(MacroAssembler* masm, int argc);
private:
static void Generate(MacroAssembler* masm,
int argc,
const ExternalReference& f);
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupResult* lookup,

5
deps/v8/src/macro-assembler.h

@ -77,13 +77,8 @@ enum AllocationFlags {
#elif V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h"
#include "assembler.h"
#ifdef V8_ARM_VARIANT_THUMB
#include "arm/assembler-thumb2.h"
#include "arm/assembler-thumb2-inl.h"
#else
#include "arm/assembler-arm.h"
#include "arm/assembler-arm-inl.h"
#endif
#include "code.h" // must be after assembler_*.h
#include "arm/macro-assembler-arm.h"
#else

74
deps/v8/src/mark-compact.cc

@ -155,8 +155,6 @@ void MarkCompactCollector::Finish() {
// objects (empty string, illegal builtin).
StubCache::Clear();
ExternalStringTable::CleanUp();
// If we've just compacted old space there's no reason to check the
// fragmentation limit. Just return.
if (HasCompacted()) return;
@ -371,18 +369,41 @@ class RootMarkingVisitor : public ObjectVisitor {
class SymbolTableCleaner : public ObjectVisitor {
public:
SymbolTableCleaner() : pointers_removed_(0) { }
virtual void VisitPointers(Object** start, Object** end) {
void VisitPointers(Object** start, Object** end) {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
// Check if the symbol being pruned is an external symbol. We need to
// delete the associated external data as this symbol is going away.
// Since the object is not marked we can access its map word safely
// without having to worry about marking bits in the object header.
Map* map = HeapObject::cast(*p)->map();
// Since no objects have yet been moved we can safely access the map of
// the object.
if ((*p)->IsExternalString()) {
Heap::FinalizeExternalString(String::cast(*p));
uint32_t type = map->instance_type();
bool is_external = (type & kStringRepresentationMask) ==
kExternalStringTag;
if (is_external) {
bool is_two_byte = (type & kStringEncodingMask) == kTwoByteStringTag;
byte* resource_addr = reinterpret_cast<byte*>(*p) +
ExternalString::kResourceOffset -
kHeapObjectTag;
if (is_two_byte) {
v8::String::ExternalStringResource** resource =
reinterpret_cast<v8::String::ExternalStringResource**>
(resource_addr);
delete *resource;
// Clear the resource pointer in the symbol.
*resource = NULL;
} else {
v8::String::ExternalAsciiStringResource** resource =
reinterpret_cast<v8::String::ExternalAsciiStringResource**>
(resource_addr);
delete *resource;
// Clear the resource pointer in the symbol.
*resource = NULL;
}
}
// Set the entry to null_value (as deleted).
*p = Heap::raw_unchecked_null_value();
@ -525,7 +546,34 @@ bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
}
class SymbolMarkingVisitor : public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
MarkingVisitor marker;
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
HeapObject* object = HeapObject::cast(*p);
// If the object is marked, we have marked or are in the process
// of marking subparts.
if (object->IsMarked()) continue;
// The object is unmarked, we do not need to unmark to use its
// map.
Map* map = object->map();
object->IterateBody(map->instance_type(),
object->SizeFromMap(map),
&marker);
}
}
};
void MarkCompactCollector::MarkSymbolTable() {
// Objects reachable from symbols are marked as live so as to ensure
// that if the symbol itself remains alive after GC for any reason,
// and if it is a cons string backed by an external string (even indirectly),
// then the external string does not receive a weak reference callback.
SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
// Mark the symbol table itself.
SetMark(symbol_table);
@ -533,6 +581,11 @@ void MarkCompactCollector::MarkSymbolTable() {
MarkingVisitor marker;
symbol_table->IteratePrefix(&marker);
ProcessMarkingStack(&marker);
// Mark subparts of the symbols but not the symbols themselves
// (unless reachable from another symbol).
SymbolMarkingVisitor symbol_marker;
symbol_table->IterateElements(&symbol_marker);
ProcessMarkingStack(&marker);
}
@ -721,8 +774,6 @@ void MarkCompactCollector::MarkLiveObjects() {
SymbolTableCleaner v;
symbol_table->IterateElements(&v);
symbol_table->ElementsRemoved(v.PointersRemoved());
ExternalStringTable::Iterate(&v);
ExternalStringTable::CleanUp();
// Remove object groups after marking phase.
GlobalHandles::RemoveObjectGroups();
@ -836,8 +887,11 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
// space are encoded in their map pointer word (along with an encoding of
// their map pointers).
//
// The excact encoding is described in the comments for class MapWord in
// objects.h.
// 31 21 20 10 9 0
// +-----------------+------------------+-----------------+
// |forwarding offset|page offset of map|page index of map|
// +-----------------+------------------+-----------------+
// 11 bits 11 bits 10 bits
//
// An address range [start, end) can have both live and non-live objects.
// Maximal non-live regions are marked so they can be skipped on subsequent

35
deps/v8/src/math.js

@ -29,6 +29,7 @@
// Keep reference to original values of some global properties. This
// has the added benefit that the code in this file is isolated from
// changes to these properties.
const $Infinity = global.Infinity;
const $floor = MathFloor;
const $random = MathRandom;
const $abs = MathAbs;
@ -117,40 +118,26 @@ function MathLog(x) {
// ECMA 262 - 15.8.2.11
function MathMax(arg1, arg2) { // length == 2
var r = -$Infinity;
var length = %_ArgumentsLength();
if (length == 0) {
return -1/0; // Compiler constant-folds this to -Infinity.
}
var r = arg1;
if (!IS_NUMBER(r)) r = ToNumber(r);
if (NUMBER_IS_NAN(r)) return r;
for (var i = 1; i < length; i++) {
var n = %_Arguments(i);
if (!IS_NUMBER(n)) n = ToNumber(n);
for (var i = 0; i < length; i++) {
var n = ToNumber(%_Arguments(i));
if (NUMBER_IS_NAN(n)) return n;
// Make sure +0 is considered greater than -0. -0 is never a Smi, +0 can be
// a Smi or heap number.
if (n > r || (r === 0 && n === 0 && !%_IsSmi(r) && 1 / r < 0)) r = n;
// Make sure +0 is considered greater than -0.
if (n > r || (r === 0 && n === 0 && !%_IsSmi(r))) r = n;
}
return r;
}
// ECMA 262 - 15.8.2.12
function MathMin(arg1, arg2) { // length == 2
var r = $Infinity;
var length = %_ArgumentsLength();
if (length == 0) {
return 1/0; // Compiler constant-folds this to Infinity.
}
var r = arg1;
if (!IS_NUMBER(r)) r = ToNumber(r);
if (NUMBER_IS_NAN(r)) return r;
for (var i = 1; i < length; i++) {
var n = %_Arguments(i);
if (!IS_NUMBER(n)) n = ToNumber(n);
for (var i = 0; i < length; i++) {
var n = ToNumber(%_Arguments(i));
if (NUMBER_IS_NAN(n)) return n;
// Make sure -0 is considered less than +0. -0 is never a Smi, +0 can b a
// Smi or a heap number.
if (n < r || (r === 0 && n === 0 && !%_IsSmi(n) && 1 / n < 0)) r = n;
// Make sure -0 is considered less than +0.
if (n < r || (r === 0 && n === 0 && !%_IsSmi(n))) r = n;
}
return r;
}

5
deps/v8/src/messages.js

@ -157,11 +157,6 @@ function FormatMessage(message) {
instanceof_nonobject_proto: "Function has non-object prototype '%0' in instanceof check",
null_to_object: "Cannot convert null to object",
reduce_no_initial: "Reduce of empty array with no initial value",
getter_must_be_callable: "Getter must be a function: %0",
setter_must_be_callable: "Setter must be a function: %0",
value_and_accessor: "Invalid property. A property cannot both have accessors and be writable or have a value: %0",
proto_object_or_null: "Object prototype may only be an Object or null",
property_desc_object: "Property description must be an object: %0",
// RangeError
invalid_array_length: "Invalid array length",
stack_overflow: "Maximum call stack size exceeded",

12
deps/v8/src/objects-inl.h

@ -952,14 +952,14 @@ MapWord MapWord::EncodeAddress(Address map_address, int offset) {
// exceed the object area size of a page.
ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
uintptr_t compact_offset = offset >> kObjectAlignmentBits;
int compact_offset = offset >> kObjectAlignmentBits;
ASSERT(compact_offset < (1 << kForwardingOffsetBits));
Page* map_page = Page::FromAddress(map_address);
ASSERT_MAP_PAGE_INDEX(map_page->mc_page_index);
uintptr_t map_page_offset =
map_page->Offset(map_address) >> kMapAlignmentBits;
int map_page_offset =
map_page->Offset(map_address) >> kObjectAlignmentBits;
uintptr_t encoding =
(compact_offset << kForwardingOffsetShift) |
@ -975,8 +975,8 @@ Address MapWord::DecodeMapAddress(MapSpace* map_space) {
ASSERT_MAP_PAGE_INDEX(map_page_index);
int map_page_offset = static_cast<int>(
((value_ & kMapPageOffsetMask) >> kMapPageOffsetShift) <<
kMapAlignmentBits);
((value_ & kMapPageOffsetMask) >> kMapPageOffsetShift)
<< kObjectAlignmentBits);
return (map_space->PageAddress(map_page_index) + map_page_offset);
}
@ -1499,7 +1499,7 @@ void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
// Range check.
ASSERT(descriptor_number < number_of_descriptors());
// Make sure none of the elements in desc are in new space.
// Make sure non of the elements in desc are in new space.
ASSERT(!Heap::InNewSpace(desc->GetKey()));
ASSERT(!Heap::InNewSpace(desc->GetValue()));

14
deps/v8/src/objects.cc

@ -1351,8 +1351,6 @@ Object* JSObject::AddFastProperty(String* name,
Object* JSObject::AddConstantFunctionProperty(String* name,
JSFunction* function,
PropertyAttributes attributes) {
ASSERT(!Heap::InNewSpace(function));
// Allocate new instance descriptors with (name, function) added
ConstantFunctionDescriptor d(name, function, attributes);
Object* new_descriptors =
@ -1439,7 +1437,7 @@ Object* JSObject::AddProperty(String* name,
// Ensure the descriptor array does not get too big.
if (map()->instance_descriptors()->number_of_descriptors() <
DescriptorArray::kMaxNumberOfDescriptors) {
if (value->IsJSFunction() && !Heap::InNewSpace(value)) {
if (value->IsJSFunction()) {
return AddConstantFunctionProperty(name,
JSFunction::cast(value),
attributes);
@ -3256,8 +3254,7 @@ Object* DescriptorArray::Allocate(int number_of_descriptors) {
return Heap::empty_descriptor_array();
}
// Allocate the array of keys.
Object* array =
Heap::AllocateFixedArray(ToKeyIndex(number_of_descriptors));
Object* array = Heap::AllocateFixedArray(ToKeyIndex(number_of_descriptors));
if (array->IsFailure()) return array;
// Do not use DescriptorArray::cast on incomplete object.
FixedArray* result = FixedArray::cast(array);
@ -7965,10 +7962,7 @@ Object* StringDictionary::TransformPropertiesToFastFor(
PropertyType type = DetailsAt(i).type();
ASSERT(type != FIELD);
instance_descriptor_length++;
if (type == NORMAL &&
(!value->IsJSFunction() || Heap::InNewSpace(value))) {
number_of_fields += 1;
}
if (type == NORMAL && !value->IsJSFunction()) number_of_fields += 1;
}
}
@ -7999,7 +7993,7 @@ Object* StringDictionary::TransformPropertiesToFastFor(
PropertyDetails details = DetailsAt(i);
PropertyType type = details.type();
if (value->IsJSFunction() && !Heap::InNewSpace(value)) {
if (value->IsJSFunction()) {
ConstantFunctionDescriptor d(String::cast(key),
JSFunction::cast(value),
details.attributes(),

47
deps/v8/src/objects.h

@ -892,25 +892,15 @@ class MapWord BASE_EMBEDDED {
static const int kOverflowBit = 1; // overflow bit
static const int kOverflowMask = (1 << kOverflowBit); // overflow mask
// Forwarding pointers and map pointer encoding. On 32 bit all the bits are
// used.
// Forwarding pointers and map pointer encoding
// 31 21 20 10 9 0
// +-----------------+------------------+-----------------+
// |forwarding offset|page offset of map|page index of map|
// +-----------------+------------------+-----------------+
// ^ ^ ^
// | | |
// | | kMapPageIndexBits
// | kMapPageOffsetBits
// kForwardingOffsetBits
static const int kMapPageOffsetBits = kPageSizeBits - kMapAlignmentBits;
static const int kForwardingOffsetBits = kPageSizeBits - kObjectAlignmentBits;
#ifdef V8_HOST_ARCH_64_BIT
static const int kMapPageIndexBits = 16;
#else
// Use all the 32-bits to encode on a 32-bit platform.
static const int kMapPageIndexBits =
32 - (kMapPageOffsetBits + kForwardingOffsetBits);
#endif
// 11 bits 11 bits 10 bits
static const int kMapPageIndexBits = 10;
static const int kMapPageOffsetBits = 11;
static const int kForwardingOffsetBits = 11;
static const int kMapPageIndexShift = 0;
static const int kMapPageOffsetShift =
@ -918,12 +908,16 @@ class MapWord BASE_EMBEDDED {
static const int kForwardingOffsetShift =
kMapPageOffsetShift + kMapPageOffsetBits;
// Bit masks covering the different parts the encoding.
static const uintptr_t kMapPageIndexMask =
// 0x000003FF
static const uint32_t kMapPageIndexMask =
(1 << kMapPageOffsetShift) - 1;
static const uintptr_t kMapPageOffsetMask =
// 0x001FFC00
static const uint32_t kMapPageOffsetMask =
((1 << kForwardingOffsetShift) - 1) & ~kMapPageIndexMask;
static const uintptr_t kForwardingOffsetMask =
// 0xFFE00000
static const uint32_t kForwardingOffsetMask =
~(kMapPageIndexMask | kMapPageOffsetMask);
private:
@ -1668,7 +1662,6 @@ class DescriptorArray: public FixedArray {
public:
// Is this the singleton empty_descriptor_array?
inline bool IsEmpty();
// Returns the number of descriptors in the array.
int number_of_descriptors() {
return IsEmpty() ? 0 : length() - kFirstIndex;
@ -1808,14 +1801,12 @@ class DescriptorArray: public FixedArray {
static int ToKeyIndex(int descriptor_number) {
return descriptor_number+kFirstIndex;
}
static int ToDetailsIndex(int descriptor_number) {
return (descriptor_number << 1) + 1;
}
static int ToValueIndex(int descriptor_number) {
return descriptor_number << 1;
}
static int ToDetailsIndex(int descriptor_number) {
return( descriptor_number << 1) + 1;
}
bool is_null_descriptor(int descriptor_number) {
return PropertyDetails(GetDetails(descriptor_number)).type() ==
@ -2847,6 +2838,7 @@ class Map: public HeapObject {
// [stub cache]: contains stubs compiled for this map.
DECL_ACCESSORS(code_cache, FixedArray)
// Returns a copy of the map.
Object* CopyDropDescriptors();
// Returns a copy of the map, with all transitions dropped from the
@ -2914,8 +2906,7 @@ class Map: public HeapObject {
static const int kInstanceDescriptorsOffset =
kConstructorOffset + kPointerSize;
static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize;
static const int kPadStart = kCodeCacheOffset + kPointerSize;
static const int kSize = MAP_SIZE_ALIGN(kPadStart);
static const int kSize = kCodeCacheOffset + kPointerSize;
// Byte offsets within kInstanceSizesOffset.
static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;

3
deps/v8/src/parser.cc

@ -2657,9 +2657,6 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Expression* cond = NULL;
if (peek() != Token::SEMICOLON) {
cond = ParseExpression(true, CHECK_OK);
if (cond && cond->AsCompareOperation()) {
cond->AsCompareOperation()->set_is_for_loop_condition();
}
}
Expect(Token::SEMICOLON, CHECK_OK);

10
deps/v8/src/prettyprinter.cc

@ -594,11 +594,11 @@ class IndentedScope BASE_EMBEDDED {
ast_printer_->inc_indent();
}
explicit IndentedScope(const char* txt, StaticType* type = NULL) {
explicit IndentedScope(const char* txt, SmiAnalysis* type = NULL) {
ast_printer_->PrintIndented(txt);
if ((type != NULL) && (type->IsKnown())) {
ast_printer_->Print(" (type = ");
ast_printer_->Print(StaticType::Type2String(type));
ast_printer_->Print(SmiAnalysis::Type2String(type));
ast_printer_->Print(")");
}
ast_printer_->Print("\n");
@ -657,7 +657,7 @@ void AstPrinter::PrintLiteralIndented(const char* info,
void AstPrinter::PrintLiteralWithModeIndented(const char* info,
Variable* var,
Handle<Object> value,
StaticType* type) {
SmiAnalysis* type) {
if (var == NULL) {
PrintLiteralIndented(info, value, true);
} else {
@ -665,7 +665,7 @@ void AstPrinter::PrintLiteralWithModeIndented(const char* info,
if (type->IsKnown()) {
OS::SNPrintF(buf, "%s (mode = %s, type = %s)", info,
Variable::Mode2String(var->mode()),
StaticType::Type2String(type));
SmiAnalysis::Type2String(type));
} else {
OS::SNPrintF(buf, "%s (mode = %s)", info,
Variable::Mode2String(var->mode()));
@ -1072,7 +1072,7 @@ void AstPrinter::VisitCountOperation(CountOperation* node) {
OS::SNPrintF(buf, "%s %s (type = %s)",
(node->is_prefix() ? "PRE" : "POST"),
Token::Name(node->op()),
StaticType::Type2String(node->type()));
SmiAnalysis::Type2String(node->type()));
} else {
OS::SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"),
Token::Name(node->op()));

2
deps/v8/src/prettyprinter.h

@ -102,7 +102,7 @@ class AstPrinter: public PrettyPrinter {
void PrintLiteralWithModeIndented(const char* info,
Variable* var,
Handle<Object> value,
StaticType* type);
SmiAnalysis* type);
void PrintLabelsIndented(const char* info, ZoneStringList* labels);
void inc_indent() { indent_++; }

2
deps/v8/src/rewriter.cc

@ -367,7 +367,7 @@ void AstOptimizer::VisitAssignment(Assignment* node) {
if (proxy != NULL) {
Variable* var = proxy->AsVariable();
if (var != NULL) {
StaticType* var_type = var->type();
SmiAnalysis* var_type = var->type();
if (var_type->IsUnknown()) {
var_type->CopyFrom(node->type());
} else if (var_type->IsLikelySmi()) {

105
deps/v8/src/runtime.cc

@ -398,82 +398,6 @@ static Object* Runtime_CreateArrayLiteralBoilerplate(Arguments args) {
}
static Object* Runtime_CreateObjectLiteral(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
CONVERT_SMI_CHECKED(literals_index, args[1]);
CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
// Check if boilerplate exists. If not, create it first.
Handle<Object> boilerplate(literals->get(literals_index));
if (*boilerplate == Heap::undefined_value()) {
boilerplate = CreateObjectLiteralBoilerplate(literals, constant_properties);
if (boilerplate.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
}
return DeepCopyBoilerplate(JSObject::cast(*boilerplate));
}
static Object* Runtime_CreateObjectLiteralShallow(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
CONVERT_SMI_CHECKED(literals_index, args[1]);
CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
// Check if boilerplate exists. If not, create it first.
Handle<Object> boilerplate(literals->get(literals_index));
if (*boilerplate == Heap::undefined_value()) {
boilerplate = CreateObjectLiteralBoilerplate(literals, constant_properties);
if (boilerplate.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
}
return Heap::CopyJSObject(JSObject::cast(*boilerplate));
}
static Object* Runtime_CreateArrayLiteral(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
CONVERT_SMI_CHECKED(literals_index, args[1]);
CONVERT_ARG_CHECKED(FixedArray, elements, 2);
// Check if boilerplate exists. If not, create it first.
Handle<Object> boilerplate(literals->get(literals_index));
if (*boilerplate == Heap::undefined_value()) {
boilerplate = CreateArrayLiteralBoilerplate(literals, elements);
if (boilerplate.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
}
return DeepCopyBoilerplate(JSObject::cast(*boilerplate));
}
static Object* Runtime_CreateArrayLiteralShallow(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
CONVERT_SMI_CHECKED(literals_index, args[1]);
CONVERT_ARG_CHECKED(FixedArray, elements, 2);
// Check if boilerplate exists. If not, create it first.
Handle<Object> boilerplate(literals->get(literals_index));
if (*boilerplate == Heap::undefined_value()) {
boilerplate = CreateArrayLiteralBoilerplate(literals, elements);
if (boilerplate.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
}
return Heap::CopyJSObject(JSObject::cast(*boilerplate));
}
static Object* Runtime_CreateCatchExtensionObject(Arguments args) {
ASSERT(args.length() == 2);
CONVERT_CHECKED(String, key, args[0]);
@ -720,7 +644,7 @@ static Object* Runtime_DeclareGlobals(Arguments args) {
// Copy the function and update its context. Use it as value.
Handle<JSFunction> boilerplate = Handle<JSFunction>::cast(value);
Handle<JSFunction> function =
Factory::NewFunctionFromBoilerplate(boilerplate, context, TENURED);
Factory::NewFunctionFromBoilerplate(boilerplate, context);
value = function;
}
@ -795,15 +719,12 @@ static Object* Runtime_DeclareContextSlot(Arguments args) {
if (*initial_value != NULL) {
if (index >= 0) {
// The variable or constant context slot should always be in
// the function context or the arguments object.
if (holder->IsContext()) {
ASSERT(holder.is_identical_to(context));
if (((attributes & READ_ONLY) == 0) ||
context->get(index)->IsTheHole()) {
context->set(index, *initial_value);
}
} else {
Handle<JSObject>::cast(holder)->SetElement(index, *initial_value);
// the function context; not in any outer context nor in the
// arguments object.
ASSERT(holder.is_identical_to(context));
if (((attributes & READ_ONLY) == 0) ||
context->get(index)->IsTheHole()) {
context->set(index, *initial_value);
}
} else {
// Slow case: The property is not in the FixedArray part of the context.
@ -4502,11 +4423,8 @@ static Object* Runtime_NewClosure(Arguments args) {
CONVERT_ARG_CHECKED(Context, context, 0);
CONVERT_ARG_CHECKED(JSFunction, boilerplate, 1);
PretenureFlag pretenure = (context->global_context() == *context)
? TENURED // Allocate global closures in old space.
: NOT_TENURED; // Allocate local closures in new space.
Handle<JSFunction> result =
Factory::NewFunctionFromBoilerplate(boilerplate, context, pretenure);
Factory::NewFunctionFromBoilerplate(boilerplate, context);
return *result;
}
@ -5222,7 +5140,7 @@ static Object* Runtime_CompileString(Arguments args) {
validate);
if (boilerplate.is_null()) return Failure::Exception();
Handle<JSFunction> fun =
Factory::NewFunctionFromBoilerplate(boilerplate, context, NOT_TENURED);
Factory::NewFunctionFromBoilerplate(boilerplate, context);
return *fun;
}
@ -5250,7 +5168,7 @@ static Object* CompileDirectEval(Handle<String> source) {
Compiler::DONT_VALIDATE_JSON);
if (boilerplate.is_null()) return Failure::Exception();
Handle<JSFunction> fun =
Factory::NewFunctionFromBoilerplate(boilerplate, context, NOT_TENURED);
Factory::NewFunctionFromBoilerplate(boilerplate, context);
return *fun;
}
@ -7887,8 +7805,7 @@ static Object* Runtime_CollectStackTrace(Arguments args) {
HandleScope scope;
limit = Max(limit, 0); // Ensure that limit is not negative.
int initial_size = Min(limit, 10);
int initial_size = limit < 10 ? limit : 10;
Handle<JSArray> result = Factory::NewJSArray(initial_size * 3);
StackFrameIterator iter;

4
deps/v8/src/runtime.h

@ -223,10 +223,6 @@ namespace internal {
F(CreateObjectLiteralBoilerplate, 3, 1) \
F(CloneLiteralBoilerplate, 1, 1) \
F(CloneShallowLiteralBoilerplate, 1, 1) \
F(CreateObjectLiteral, 3, 1) \
F(CreateObjectLiteralShallow, 3, 1) \
F(CreateArrayLiteral, 3, 1) \
F(CreateArrayLiteralShallow, 3, 1) \
\
/* Catch context extension objects */ \
F(CreateCatchExtensionObject, 2, 1) \

6
deps/v8/src/runtime.js

@ -122,12 +122,6 @@ function COMPARE(x, ncr) {
return %StringCompare(this, x);
}
// If one of the operands is undefined, it will convert to NaN and
// thus the result should be as if one of the operands was NaN.
if (IS_UNDEFINED(this) || IS_UNDEFINED(x)) {
return ncr;
}
// Default implementation.
var a = %ToPrimitive(this, NUMBER_HINT);
var b = %ToPrimitive(x, NUMBER_HINT);

3
deps/v8/src/scopes.cc

@ -189,7 +189,8 @@ void Scope::Initialize(bool inside_with) {
variables_.Declare(this, Factory::this_symbol(), Variable::VAR,
false, Variable::THIS);
var->rewrite_ = new Slot(var, Slot::PARAMETER, -1);
receiver_ = var;
receiver_ = new VariableProxy(Factory::this_symbol(), true, false);
receiver_->BindTo(var);
if (is_function_scope()) {
// Declare 'arguments' variable which exists in all functions.

11
deps/v8/src/scopes.h

@ -206,13 +206,8 @@ class Scope: public ZoneObject {
// ---------------------------------------------------------------------------
// Accessors.
// A new variable proxy corresponding to the (function) receiver.
VariableProxy* receiver() const {
VariableProxy* proxy =
new VariableProxy(Factory::this_symbol(), true, false);
proxy->BindTo(receiver_);
return proxy;
}
// The variable corresponding to the (function) receiver.
VariableProxy* receiver() const { return receiver_; }
// The variable holding the function literal for named function
// literals, or NULL.
@ -319,7 +314,7 @@ class Scope: public ZoneObject {
// Declarations.
ZoneList<Declaration*> decls_;
// Convenience variable.
Variable* receiver_;
VariableProxy* receiver_;
// Function variable, if any; function scopes only.
Variable* function_;
// Convenience variable; function scopes only.

22
deps/v8/src/serialize.cc

@ -55,8 +55,9 @@ class SerializationAddressMapper {
static int MappedTo(HeapObject* obj) {
ASSERT(IsMapped(obj));
return static_cast<int>(reinterpret_cast<intptr_t>(
serialization_map_->Lookup(Key(obj), Hash(obj), false)->value));
return reinterpret_cast<intptr_t>(serialization_map_->Lookup(Key(obj),
Hash(obj),
false)->value);
}
static void Map(HeapObject* obj, int to) {
@ -80,7 +81,7 @@ class SerializationAddressMapper {
}
static uint32_t Hash(HeapObject* obj) {
return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
return reinterpret_cast<intptr_t>(obj->address());
}
static void* Key(HeapObject* obj) {
@ -484,15 +485,6 @@ void ExternalReferenceTable::PopulateTable() {
21,
"NativeRegExpMacroAssembler::GrowStack()");
#endif
// Keyed lookup cache.
Add(ExternalReference::keyed_lookup_cache_keys().address(),
UNCLASSIFIED,
22,
"KeyedLookupCache::keys()");
Add(ExternalReference::keyed_lookup_cache_field_offsets().address(),
UNCLASSIFIED,
23,
"KeyedLookupCache::field_offsets()");
}
@ -632,7 +624,7 @@ HeapObject* Deserializer::GetAddressFromStart(int space) {
return HeapObject::FromAddress(pages_[space][0] + offset);
}
ASSERT(SpaceIsPaged(space));
int page_of_pointee = offset >> kPageSizeBits;
int page_of_pointee = offset >> Page::kPageSizeBits;
Address object_address = pages_[space][page_of_pointee] +
(offset & Page::kPageAlignmentMask);
return HeapObject::FromAddress(object_address);
@ -972,8 +964,8 @@ void Serializer::SerializeObject(
int offset = CurrentAllocationAddress(space) - address;
bool from_start = true;
if (SpaceIsPaged(space)) {
if ((CurrentAllocationAddress(space) >> kPageSizeBits) ==
(address >> kPageSizeBits)) {
if ((CurrentAllocationAddress(space) >> Page::kPageSizeBits) ==
(address >> Page::kPageSizeBits)) {
from_start = false;
address = offset;
}

4
deps/v8/src/spaces.cc

@ -398,7 +398,7 @@ static int PagesInChunk(Address start, size_t size) {
// start+size. Page::kPageSize is a power of two so we can divide by
// shifting.
return static_cast<int>((RoundDown(start + size, Page::kPageSize)
- RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
- RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits);
}
@ -412,7 +412,7 @@ Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
if (size_ + static_cast<int>(chunk_size) > capacity_) {
// Request as many pages as we can.
chunk_size = capacity_ - size_;
requested_pages = static_cast<int>(chunk_size >> kPageSizeBits);
requested_pages = static_cast<int>(chunk_size >> Page::kPageSizeBits);
if (requested_pages <= 0) return Page::FromAddress(NULL);
}

27
deps/v8/src/spaces.h

@ -65,23 +65,20 @@ namespace internal {
// Some assertion macros used in the debugging mode.
#define ASSERT_PAGE_ALIGNED(address) \
#define ASSERT_PAGE_ALIGNED(address) \
ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
#define ASSERT_OBJECT_ALIGNED(address) \
#define ASSERT_OBJECT_ALIGNED(address) \
ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
#define ASSERT_MAP_ALIGNED(address) \
ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
#define ASSERT_OBJECT_SIZE(size) \
#define ASSERT_OBJECT_SIZE(size) \
ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
#define ASSERT_PAGE_OFFSET(offset) \
ASSERT((Page::kObjectStartOffset <= offset) \
#define ASSERT_PAGE_OFFSET(offset) \
ASSERT((Page::kObjectStartOffset <= offset) \
&& (offset <= Page::kPageSize))
#define ASSERT_MAP_PAGE_INDEX(index) \
#define ASSERT_MAP_PAGE_INDEX(index) \
ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
@ -109,8 +106,11 @@ class AllocationInfo;
// For this reason we add an offset to get room for the Page data at the start.
//
// The mark-compact collector transforms a map pointer into a page index and a
// page offset. The excact encoding is described in the comments for
// class MapWord in objects.h.
// page offset. The map space can have up to 1024 pages, and 8M bytes (1024 *
// 8K) in total. Because a map pointer is aligned to the pointer size (4
// bytes), 11 bits are enough to encode the page offset. 21 bits (10 for the
// page index + 11 for the offset in the page) are required to encode a map
// pointer.
//
// The only way to get a page pointer is by calling factory methods:
// Page* p = Page::FromAddress(addr); or
@ -212,6 +212,9 @@ class Page {
static void set_rset_state(RSetState state) { rset_state_ = state; }
#endif
// 8K bytes per page.
static const int kPageSizeBits = 13;
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
@ -511,7 +514,7 @@ class MemoryAllocator : public AllStatic {
#endif
// Due to encoding limitation, we can only have 8K chunks.
static const int kMaxNofChunks = 1 << kPageSizeBits;
static const int kMaxNofChunks = 1 << Page::kPageSizeBits;
// If a chunk has at least 16 pages, the maximum heap size is about
// 8K * 8K * 16 = 1G bytes.
#ifdef V8_TARGET_ARCH_X64

4
deps/v8/src/stub-cache.cc

@ -120,7 +120,7 @@ Object* StubCache::ComputeLoadCallback(String* name,
Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadCallback(name, receiver, holder, callback);
code = compiler.CompileLoadCallback(receiver, holder, callback, name);
if (code->IsFailure()) return code;
LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
@ -831,7 +831,7 @@ static Object* ThrowReferenceError(String* name) {
// can't use either LoadIC or KeyedLoadIC constructors.
IC ic(IC::NO_EXTRA_FRAME);
ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub());
if (!ic.SlowIsContextual()) return Heap::undefined_value();
if (!ic.is_contextual()) return Heap::undefined_value();
// Throw a reference error.
HandleScope scope;

11
deps/v8/src/stub-cache.h

@ -405,7 +405,7 @@ class StubCompiler BASE_EMBEDDED {
String* name,
Label* miss);
bool GenerateLoadCallback(JSObject* object,
void GenerateLoadCallback(JSObject* object,
JSObject* holder,
Register receiver,
Register name_reg,
@ -413,8 +413,7 @@ class StubCompiler BASE_EMBEDDED {
Register scratch2,
AccessorInfo* callback,
String* name,
Label* miss,
Failure** failure);
Label* miss);
void GenerateLoadConstant(JSObject* object,
JSObject* holder,
@ -448,10 +447,10 @@ class LoadStubCompiler: public StubCompiler {
JSObject* holder,
int index,
String* name);
Object* CompileLoadCallback(String* name,
JSObject* object,
Object* CompileLoadCallback(JSObject* object,
JSObject* holder,
AccessorInfo* callback);
AccessorInfo* callback,
String* name);
Object* CompileLoadConstant(JSObject* object,
JSObject* holder,
Object* value,

2
deps/v8/src/token.cc

@ -32,11 +32,13 @@
namespace v8 {
namespace internal {
#ifdef DEBUG
#define T(name, string, precedence) #name,
const char* Token::name_[NUM_TOKENS] = {
TOKEN_LIST(T, T, IGNORE_TOKEN)
};
#undef T
#endif
#define T(name, string, precedence) string,

9
deps/v8/src/token.h

@ -66,9 +66,8 @@ namespace internal {
T(DEC, "--", 0) \
\
/* Assignment operators. */ \
/* IsAssignmentOp() and Assignment::is_compound() relies on */ \
/* this block of enum values being contiguous and sorted in the */ \
/* same order! */ \
/* IsAssignmentOp() relies on this block of enum values */ \
/* being contiguous and sorted in the same order! */ \
T(INIT_VAR, "=init_var", 2) /* AST-use only. */ \
T(INIT_CONST, "=init_const", 2) /* AST-use only. */ \
T(ASSIGN, "=", 2) \
@ -212,12 +211,14 @@ class Token {
};
#undef T
#ifdef DEBUG
// Returns a string corresponding to the C++ token name
// (e.g. "LT" for the token LT).
static const char* Name(Value tok) {
ASSERT(0 <= tok && tok < NUM_TOKENS);
return name_[tok];
}
#endif
// Predicates
static bool IsAssignmentOp(Value tok) {
@ -260,7 +261,9 @@ class Token {
}
private:
#ifdef DEBUG
static const char* name_[NUM_TOKENS];
#endif
static const char* string_[NUM_TOKENS];
static int8_t precedence_[NUM_TOKENS];
};

2
deps/v8/src/v8-counters.h

@ -74,6 +74,8 @@ namespace internal {
SC(objs_since_last_full, V8.ObjsSinceLastFull) \
SC(symbol_table_capacity, V8.SymbolTableCapacity) \
SC(number_of_symbols, V8.NumberOfSymbols) \
/* Current amount of memory in external string buffers. */ \
SC(total_external_string_memory, V8.TotalExternalStringMemory) \
SC(script_wrappers, V8.ScriptWrappers) \
SC(call_initialize_stubs, V8.CallInitializeStubs) \
SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs) \

207
deps/v8/src/v8natives.js

@ -41,7 +41,6 @@
const $isNaN = GlobalIsNaN;
const $isFinite = GlobalIsFinite;
// ----------------------------------------------------------------------------
@ -88,7 +87,7 @@ function GlobalIsFinite(number) {
// ECMA-262 - 15.1.2.2
function GlobalParseInt(string, radix) {
if (IS_UNDEFINED(radix)) {
if (radix === void 0) {
// Some people use parseInt instead of Math.floor. This
// optimization makes parseInt on a Smi 12 times faster (60ns
// vs 800ns). The following optimization makes parseInt on a
@ -281,207 +280,6 @@ function ObjectKeys(obj) {
}
// ES5 8.10.1.
function IsAccessorDescriptor(desc) {
if (IS_UNDEFINED(desc)) return false;
return desc.hasGetter_ || desc.hasSetter_;
}
// ES5 8.10.2.
function IsDataDescriptor(desc) {
if (IS_UNDEFINED(desc)) return false;
return desc.hasValue_ || desc.hasWritable_;
}
// ES5 8.10.3.
function IsGenericDescriptor(desc) {
return !(IsAccessorDescriptor(desc) || IsDataDescriptor(desc));
}
function IsInconsistentDescriptor(desc) {
return IsAccessorDescriptor(desc) && IsDataDescriptor(desc);
}
// ES5 8.10.5.
function ToPropertyDescriptor(obj) {
if (!IS_OBJECT(obj)) {
throw MakeTypeError("property_desc_object", [obj]);
}
var desc = new PropertyDescriptor();
if ("enumerable" in obj) {
desc.setEnumerable(ToBoolean(obj.enumerable));
}
if ("configurable" in obj) {
desc.setConfigurable(ToBoolean(obj.configurable));
}
if ("value" in obj) {
desc.setValue(obj.value);
}
if ("writable" in obj) {
desc.setWritable(ToBoolean(obj.writable));
}
if ("get" in obj) {
var get = obj.get;
if (!IS_UNDEFINED(get) && !IS_FUNCTION(get)) {
throw MakeTypeError("getter_must_be_callable", [get]);
}
desc.setGet(get);
}
if ("set" in obj) {
var set = obj.set;
if (!IS_UNDEFINED(set) && !IS_FUNCTION(set)) {
throw MakeTypeError("setter_must_be_callable", [set]);
}
desc.setSet(set);
}
if (IsInconsistentDescriptor(desc)) {
throw MakeTypeError("value_and_accessor", [obj]);
}
return desc;
}
function PropertyDescriptor() {
// Initialize here so they are all in-object and have the same map.
// Default values from ES5 8.6.1.
this.value_ = void 0;
this.hasValue_ = false;
this.writable_ = false;
this.hasWritable_ = false;
this.enumerable_ = false;
this.configurable_ = false;
this.get_ = void 0;
this.hasGetter_ = false;
this.set_ = void 0;
this.hasSetter_ = false;
}
PropertyDescriptor.prototype.setValue = function(value) {
this.value_ = value;
this.hasValue_ = true;
}
PropertyDescriptor.prototype.getValue = function() {
return this.value_;
}
PropertyDescriptor.prototype.setEnumerable = function(enumerable) {
this.enumerable_ = enumerable;
}
PropertyDescriptor.prototype.isEnumerable = function () {
return this.enumerable_;
}
PropertyDescriptor.prototype.setWritable = function(writable) {
this.writable_ = writable;
this.hasWritable_ = true;
}
PropertyDescriptor.prototype.isWritable = function() {
return this.writable_;
}
PropertyDescriptor.prototype.setConfigurable = function(configurable) {
this.configurable_ = configurable;
}
PropertyDescriptor.prototype.isConfigurable = function() {
return this.configurable_;
}
PropertyDescriptor.prototype.setGet = function(get) {
this.get_ = get;
this.hasGetter_ = true;
}
PropertyDescriptor.prototype.getGet = function() {
return this.get_;
}
PropertyDescriptor.prototype.setSet = function(set) {
this.set_ = set;
this.hasSetter_ = true;
}
PropertyDescriptor.prototype.getSet = function() {
return this.set_;
}
// ES5 8.12.9. This version cannot cope with the property p already
// being present on obj.
function DefineOwnProperty(obj, p, desc, should_throw) {
var flag = desc.isEnumerable() ? 0 : DONT_ENUM;
if (IsDataDescriptor(desc)) {
flag |= desc.isWritable() ? 0 : (DONT_DELETE | READ_ONLY);
%SetProperty(obj, p, desc.getValue(), flag);
} else {
if (IS_FUNCTION(desc.getGet())) %DefineAccessor(obj, p, GETTER, desc.getGet(), flag);
if (IS_FUNCTION(desc.getSet())) %DefineAccessor(obj, p, SETTER, desc.getSet(), flag);
}
return true;
}
// ES5 section 15.2.3.5.
function ObjectCreate(proto, properties) {
if (!IS_OBJECT(proto) && !IS_NULL(proto)) {
throw MakeTypeError("proto_object_or_null", [proto]);
}
var obj = new $Object();
obj.__proto__ = proto;
if (!IS_UNDEFINED(properties)) ObjectDefineProperties(obj, properties);
return obj;
}
// ES5 section 15.2.3.7. This version cannot cope with the properies already
// being present on obj. Therefore it is not exposed as
// Object.defineProperties yet.
function ObjectDefineProperties(obj, properties) {
var props = ToObject(properties);
var key_values = [];
for (var key in props) {
if (%HasLocalProperty(props, key)) {
key_values.push(key);
var value = props[key];
var desc = ToPropertyDescriptor(value);
key_values.push(desc);
}
}
for (var i = 0; i < key_values.length; i += 2) {
var key = key_values[i];
var desc = key_values[i + 1];
DefineOwnProperty(obj, key, desc, true);
}
}
%SetCode($Object, function(x) {
if (%_IsConstructCall()) {
if (x == null) return this;
@ -511,8 +309,7 @@ function SetupObject() {
"__lookupSetter__", ObjectLookupSetter
));
InstallFunctions($Object, DONT_ENUM, $Array(
"keys", ObjectKeys,
"create", ObjectCreate
"keys", ObjectKeys
));
}

4
deps/v8/src/variables.cc

@ -86,10 +86,10 @@ void UseCount::Print() {
// ----------------------------------------------------------------------------
// Implementation StaticType.
// Implementation SmiAnalysis.
const char* StaticType::Type2String(StaticType* type) {
const char* SmiAnalysis::Type2String(SmiAnalysis* type) {
switch (type->kind_) {
case UNKNOWN:
return "UNKNOWN";

14
deps/v8/src/variables.h

@ -65,14 +65,14 @@ class UseCount BASE_EMBEDDED {
// Variables and AST expression nodes can track their "type" to enable
// optimizations and removal of redundant checks when generating code.
class StaticType {
class SmiAnalysis {
public:
enum Kind {
UNKNOWN,
LIKELY_SMI
};
StaticType() : kind_(UNKNOWN) {}
SmiAnalysis() : kind_(UNKNOWN) {}
bool Is(Kind kind) const { return kind_ == kind; }
@ -80,11 +80,11 @@ class StaticType {
bool IsUnknown() const { return Is(UNKNOWN); }
bool IsLikelySmi() const { return Is(LIKELY_SMI); }
void CopyFrom(StaticType* other) {
void CopyFrom(SmiAnalysis* other) {
kind_ = other->kind_;
}
static const char* Type2String(StaticType* type);
static const char* Type2String(SmiAnalysis* type);
// LIKELY_SMI accessors
void SetAsLikelySmi() {
@ -100,7 +100,7 @@ class StaticType {
private:
Kind kind_;
DISALLOW_COPY_AND_ASSIGN(StaticType);
DISALLOW_COPY_AND_ASSIGN(SmiAnalysis);
};
@ -203,7 +203,7 @@ class Variable: public ZoneObject {
Expression* rewrite() const { return rewrite_; }
Slot* slot() const;
StaticType* type() { return &type_; }
SmiAnalysis* type() { return &type_; }
private:
Scope* scope_;
@ -220,7 +220,7 @@ class Variable: public ZoneObject {
UseCount obj_uses_; // uses of the object the variable points to
// Static type information
StaticType type_;
SmiAnalysis type_;
// Code generation.
// rewrite_ is usually a Slot or a Property, but may be any expression.

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 0
#define BUILD_NUMBER 5
#define BUILD_NUMBER 3
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false

276
deps/v8/src/x64/codegen-x64.cc

@ -4051,8 +4051,7 @@ void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
Load(args->at(0));
Load(args->at(1));
StringAddStub stub(NO_STRING_ADD_FLAGS);
Result answer = frame_->CallStub(&stub, 2);
Result answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
frame_->Push(&answer);
}
@ -5127,7 +5126,7 @@ void DeferredInlineBinaryOperation::Generate() {
void CodeGenerator::GenericBinaryOperation(Token::Value op,
StaticType* type,
SmiAnalysis* type,
OverwriteMode overwrite_mode) {
Comment cmnt(masm_, "[ BinaryOperation");
Comment cmnt_token(masm_, Token::String(op));
@ -5316,7 +5315,7 @@ void DeferredInlineSmiOperation::Generate() {
void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
Result* operand,
Handle<Object> value,
StaticType* type,
SmiAnalysis* type,
bool reversed,
OverwriteMode overwrite_mode) {
// NOTE: This is an attempt to inline (a bit) more of the code for
@ -6099,7 +6098,7 @@ void Reference::SetValue(InitState init_state) {
// a loop and the key is likely to be a smi.
Property* property = expression()->AsProperty();
ASSERT(property != NULL);
StaticType* key_smi_analysis = property->key()->type();
SmiAnalysis* key_smi_analysis = property->key()->type();
if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
Comment cmnt(masm, "[ Inlined store to keyed Property");
@ -7372,28 +7371,19 @@ void FloatingPointHelper::CheckNumberOperands(MacroAssembler* masm,
const char* GenericBinaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int len = 100;
name_ = Bootstrapper::AllocateAutoDeletedArray(len);
if (name_ == NULL) return "OOM";
const char* op_name = Token::Name(op_);
const char* overwrite_name;
switch (mode_) {
case NO_OVERWRITE: overwrite_name = "Alloc"; break;
case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
default: overwrite_name = "UnknownOverwrite"; break;
}
OS::SNPrintF(Vector<char>(name_, len),
"GenericBinaryOpStub_%s_%s%s_%s%s_%s",
op_name,
overwrite_name,
(flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
args_in_registers_ ? "RegArgs" : "StackArgs",
args_reversed_ ? "_R" : "",
use_sse3_ ? "SSE3" : "SSE2");
return name_;
switch (op_) {
case Token::ADD: return "GenericBinaryOpStub_ADD";
case Token::SUB: return "GenericBinaryOpStub_SUB";
case Token::MUL: return "GenericBinaryOpStub_MUL";
case Token::DIV: return "GenericBinaryOpStub_DIV";
case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
case Token::SAR: return "GenericBinaryOpStub_SAR";
case Token::SHL: return "GenericBinaryOpStub_SHL";
case Token::SHR: return "GenericBinaryOpStub_SHR";
default: return "GenericBinaryOpStub";
}
}
@ -7806,8 +7796,8 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ j(above_equal, &string1);
// First and second argument are strings.
StringAddStub stub(NO_STRING_CHECK_IN_STUB);
__ TailCallStub(&stub);
Runtime::Function* f = Runtime::FunctionForId(Runtime::kStringAdd);
__ TailCallRuntime(ExternalReference(f), 2, f->result_size);
// Only first argument is a string.
__ bind(&string1);
@ -7890,234 +7880,6 @@ int CompareStub::MinorKey() {
return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
}
void StringAddStub::Generate(MacroAssembler* masm) {
Label string_add_runtime;
// Load the two arguments.
__ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument.
__ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument.
// Make sure that both arguments are strings if not known in advance.
if (string_check_) {
Condition is_smi;
is_smi = masm->CheckSmi(rax);
__ j(is_smi, &string_add_runtime);
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
__ j(above_equal, &string_add_runtime);
// First argument is a a string, test second.
is_smi = masm->CheckSmi(rdx);
__ j(is_smi, &string_add_runtime);
__ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
__ j(above_equal, &string_add_runtime);
}
// Both arguments are strings.
// rax: first string
// rdx: second string
// Check if either of the strings are empty. In that case return the other.
Label second_not_zero_length, both_not_zero_length;
__ movl(rcx, FieldOperand(rdx, String::kLengthOffset));
__ testl(rcx, rcx);
__ j(not_zero, &second_not_zero_length);
// Second string is empty, result is first string which is already in rax.
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
__ bind(&second_not_zero_length);
__ movl(rbx, FieldOperand(rax, String::kLengthOffset));
__ testl(rbx, rbx);
__ j(not_zero, &both_not_zero_length);
// First string is empty, result is second string which is in rdx.
__ movq(rax, rdx);
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
// Both strings are non-empty.
// rax: first string
// rbx: length of first string
// ecx: length of second string
// edx: second string
// r8: instance type of first string if string check was performed above
// r9: instance type of first string if string check was performed above
Label string_add_flat_result;
__ bind(&both_not_zero_length);
// Look at the length of the result of adding the two strings.
__ addl(rbx, rcx);
// Use the runtime system when adding two one character strings, as it
// contains optimizations for this specific case using the symbol table.
__ cmpl(rbx, Immediate(2));
__ j(equal, &string_add_runtime);
// If arguments where known to be strings, maps are not loaded to r8 and r9
// by the code above.
if (!string_check_) {
__ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
__ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
}
// Get the instance types of the two strings as they will be needed soon.
__ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
__ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
// Check if resulting string will be flat.
__ cmpl(rbx, Immediate(String::kMinNonFlatLength));
__ j(below, &string_add_flat_result);
// Handle exceptionally long strings in the runtime system.
ASSERT((String::kMaxLength & 0x80000000) == 0);
__ cmpl(rbx, Immediate(String::kMaxLength));
__ j(above, &string_add_runtime);
// If result is not supposed to be flat, allocate a cons string object. If
// both strings are ascii the result is an ascii cons string.
// rax: first string
// ebx: length of resulting flat string
// rdx: second string
// r8: instance type of first string
// r9: instance type of second string
Label non_ascii, allocated;
__ movl(rcx, r8);
__ and_(rcx, r9);
ASSERT(kStringEncodingMask == kAsciiStringTag);
__ testl(rcx, Immediate(kAsciiStringTag));
__ j(zero, &non_ascii);
// Allocate an acsii cons string.
__ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
__ movl(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
__ movl(FieldOperand(rcx, ConsString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
__ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
__ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
__ movq(rax, rcx);
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
__ bind(&non_ascii);
// Allocate a two byte cons string.
__ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime);
__ jmp(&allocated);
// Handle creating a flat result. First check that both strings are not
// external strings.
// rax: first string
// ebx: length of resulting flat string
// rdx: second string
// r8: instance type of first string
// r9: instance type of first string
__ bind(&string_add_flat_result);
__ movl(rcx, r8);
__ and_(rcx, Immediate(kStringRepresentationMask));
__ cmpl(rcx, Immediate(kExternalStringTag));
__ j(equal, &string_add_runtime);
__ movl(rcx, r9);
__ and_(rcx, Immediate(kStringRepresentationMask));
__ cmpl(rcx, Immediate(kExternalStringTag));
__ j(equal, &string_add_runtime);
// Now check if both strings are ascii strings.
// rax: first string
// ebx: length of resulting flat string
// rdx: second string
// r8: instance type of first string
// r9: instance type of second string
Label non_ascii_string_add_flat_result;
ASSERT(kStringEncodingMask == kAsciiStringTag);
__ testl(r8, Immediate(kAsciiStringTag));
__ j(zero, &non_ascii_string_add_flat_result);
__ testl(r9, Immediate(kAsciiStringTag));
__ j(zero, &string_add_runtime);
// Both strings are ascii strings. As they are short they are both flat.
__ AllocateAsciiString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
// rcx: result string
__ movq(rbx, rcx);
// Locate first character of result.
__ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// Locate first character of first argument
__ movl(rdi, FieldOperand(rax, String::kLengthOffset));
__ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// rax: first char of first argument
// rbx: result string
// rcx: first character of result
// rdx: second string
// rdi: length of first argument
GenerateCopyCharacters(masm, rcx, rax, rdi, true);
// Locate first character of second argument.
__ movl(rdi, FieldOperand(rdx, String::kLengthOffset));
__ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// rbx: result string
// rcx: next character of result
// rdx: first char of second argument
// rdi: length of second argument
GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
__ movq(rax, rbx);
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
// Handle creating a flat two byte result.
// rax: first string - known to be two byte
// rbx: length of resulting flat string
// rdx: second string
// r8: instance type of first string
// r9: instance type of first string
__ bind(&non_ascii_string_add_flat_result);
__ and_(r9, Immediate(kAsciiStringTag));
__ j(not_zero, &string_add_runtime);
// Both strings are two byte strings. As they are short they are both
// flat.
__ AllocateTwoByteString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
// rcx: result string
__ movq(rbx, rcx);
// Locate first character of result.
__ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// Locate first character of first argument.
__ movl(rdi, FieldOperand(rax, String::kLengthOffset));
__ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// rax: first char of first argument
// rbx: result string
// rcx: first character of result
// rdx: second argument
// rdi: length of first argument
GenerateCopyCharacters(masm, rcx, rax, rdi, false);
// Locate first character of second argument.
__ movl(rdi, FieldOperand(rdx, String::kLengthOffset));
__ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// rbx: result string
// rcx: next character of result
// rdx: first char of second argument
// rdi: length of second argument
GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
__ movq(rax, rbx);
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
// Just jump to runtime to add the two strings.
__ bind(&string_add_runtime);
__ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
}
void StringAddStub::GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
bool ascii) {
Label loop;
__ bind(&loop);
// This loop just copies one character at a time, as it is only used for very
// short strings.
if (ascii) {
__ movb(kScratchRegister, Operand(src, 0));
__ movb(Operand(dest, 0), kScratchRegister);
__ addq(src, Immediate(1));
__ addq(dest, Immediate(1));
} else {
__ movzxwl(kScratchRegister, Operand(src, 0));
__ movw(Operand(dest, 0), kScratchRegister);
__ addq(src, Immediate(2));
__ addq(dest, Immediate(2));
}
__ subl(count, Immediate(1));
__ j(not_zero, &loop);
}
#undef __
#define __ masm.

38
deps/v8/src/x64/codegen-x64.h

@ -436,7 +436,7 @@ class CodeGenerator: public AstVisitor {
void GenericBinaryOperation(
Token::Value op,
StaticType* type,
SmiAnalysis* type,
OverwriteMode overwrite_mode);
// If possible, combine two constant smi values using op to produce
@ -449,7 +449,7 @@ class CodeGenerator: public AstVisitor {
void ConstantSmiBinaryOperation(Token::Value op,
Result* operand,
Handle<Object> constant_operand,
StaticType* type,
SmiAnalysis* type,
bool reversed,
OverwriteMode overwrite_mode);
@ -670,8 +670,7 @@ class GenericBinaryOpStub: public CodeStub {
mode_(mode),
flags_(flags),
args_in_registers_(false),
args_reversed_(false),
name_(NULL) {
args_reversed_(false) {
use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@ -690,7 +689,6 @@ class GenericBinaryOpStub: public CodeStub {
bool args_in_registers_; // Arguments passed in registers not on the stack.
bool args_reversed_; // Left and right argument are swapped.
bool use_sse3_;
char* name_;
const char* GetName();
@ -747,36 +745,6 @@ class GenericBinaryOpStub: public CodeStub {
};
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
};
class StringAddStub: public CodeStub {
public:
explicit StringAddStub(StringAddFlags flags) {
string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
}
private:
Major MajorKey() { return StringAdd; }
int MinorKey() { return string_check_ ? 0 : 1; }
void Generate(MacroAssembler* masm);
void GenerateCopyCharacters(MacroAssembler* masm,
Register desc,
Register src,
Register count,
bool ascii);
// Should the stub check whether arguments are strings?
bool string_check_;
};
} } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_H_

354
deps/v8/src/x64/fast-codegen-x64.cc

@ -420,97 +420,73 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
Variable* var = decl->proxy()->var();
ASSERT(var != NULL); // Must have been resolved.
Slot* slot = var->slot();
Property* prop = var->AsProperty();
if (slot != NULL) {
switch (slot->type()) {
case Slot::PARAMETER: // Fall through.
case Slot::LOCAL:
if (decl->mode() == Variable::CONST) {
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
__ movq(Operand(rbp, SlotOffset(var->slot())), kScratchRegister);
} else if (decl->fun() != NULL) {
Visit(decl->fun());
__ pop(Operand(rbp, SlotOffset(var->slot())));
}
break;
case Slot::CONTEXT:
// The variable in the decl always resides in the current context.
ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
ASSERT(slot != NULL); // No global declarations here.
// We have 3 cases for slots: LOOKUP, LOCAL, CONTEXT.
switch (slot->type()) {
case Slot::LOOKUP: {
__ push(rsi);
__ Push(var->name());
// Declaration nodes are always introduced in one of two modes.
ASSERT(decl->mode() == Variable::VAR || decl->mode() == Variable::CONST);
PropertyAttributes attr = decl->mode() == Variable::VAR ?
NONE : READ_ONLY;
__ Push(Smi::FromInt(attr));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (decl->mode() == Variable::CONST) {
__ Push(Factory::the_hole_value());
} else if (decl->fun() != NULL) {
Visit(decl->fun());
} else {
__ Push(Smi::FromInt(0)); // no initial value!
}
__ CallRuntime(Runtime::kDeclareContextSlot, 4);
break;
}
case Slot::LOCAL:
if (decl->mode() == Variable::CONST) {
__ Move(Operand(rbp, SlotOffset(var->slot())),
Factory::the_hole_value());
} else if (decl->fun() != NULL) {
Visit(decl->fun());
__ pop(Operand(rbp, SlotOffset(var->slot())));
}
break;
case Slot::CONTEXT:
// The variable in the decl always resides in the current context.
ASSERT(function_->scope()->ContextChainLength(slot->var()->scope()) == 0);
if (decl->mode() == Variable::CONST) {
__ Move(rax, Factory::the_hole_value());
if (FLAG_debug_code) {
// Check if we have the correct context pointer.
__ movq(rbx,
CodeGenerator::ContextOperand(rsi, Context::FCONTEXT_INDEX));
__ movq(rbx, CodeGenerator::ContextOperand(rsi,
Context::FCONTEXT_INDEX));
__ cmpq(rbx, rsi);
__ Check(equal, "Unexpected declaration in current context.");
}
if (decl->mode() == Variable::CONST) {
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
__ movq(CodeGenerator::ContextOperand(rsi, slot->index()),
kScratchRegister);
// No write barrier since the hole value is in old space.
} else if (decl->fun() != NULL) {
Visit(decl->fun());
__ pop(rax);
__ movq(CodeGenerator::ContextOperand(rsi, slot->index()), rax);
int offset = Context::SlotOffset(slot->index());
__ RecordWrite(rsi, offset, rax, rcx);
}
break;
case Slot::LOOKUP: {
__ push(rsi);
__ Push(var->name());
// Declaration nodes are always introduced in one of two modes.
ASSERT(decl->mode() == Variable::VAR ||
decl->mode() == Variable::CONST);
PropertyAttributes attr =
(decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
__ Push(Smi::FromInt(attr));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (decl->mode() == Variable::CONST) {
__ PushRoot(Heap::kTheHoleValueRootIndex);
} else if (decl->fun() != NULL) {
Visit(decl->fun());
} else {
__ Push(Smi::FromInt(0)); // no initial value!
}
__ CallRuntime(Runtime::kDeclareContextSlot, 4);
break;
}
}
} else if (prop != NULL) {
if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
// We are declaring a function or constant that rewrites to a
// property. Use (keyed) IC to set the initial value.
ASSERT_EQ(Expression::kValue, prop->obj()->context());
Visit(prop->obj());
ASSERT_EQ(Expression::kValue, prop->key()->context());
Visit(prop->key());
if (decl->fun() != NULL) {
ASSERT_EQ(Expression::kValue, decl->fun()->context());
__ movq(CodeGenerator::ContextOperand(rsi, slot->index()), rax);
// No write barrier since the_hole_value is in old space.
ASSERT(!Heap::InNewSpace(*Factory::the_hole_value()));
} else if (decl->fun() != NULL) {
Visit(decl->fun());
__ pop(rax);
} else {
__ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
if (FLAG_debug_code) {
// Check if we have the correct context pointer.
__ movq(rbx, CodeGenerator::ContextOperand(rsi,
Context::FCONTEXT_INDEX));
__ cmpq(rbx, rsi);
__ Check(equal, "Unexpected declaration in current context.");
}
__ movq(CodeGenerator::ContextOperand(rsi, slot->index()), rax);
int offset = Context::SlotOffset(slot->index());
__ RecordWrite(rsi, offset, rax, rcx);
}
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// Absence of a test rax instruction following the call
// indicates that none of the load was inlined.
// Value in rax is ignored (declarations are statements). Receiver
// and key on stack are discarded.
__ addq(rsp, Immediate(2 * kPointerSize));
}
break;
default:
UNREACHABLE();
}
}
@ -525,6 +501,20 @@ void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
Expression* expr = stmt->expression();
if (expr->AsLiteral() != NULL) {
__ Move(rax, expr->AsLiteral()->handle());
} else {
Visit(expr);
ASSERT_EQ(Expression::kValue, expr->context());
__ pop(rax);
}
EmitReturnSequence(stmt->statement_pos());
}
void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
@ -545,20 +535,14 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
EmitVariableLoad(expr->var(), expr->context());
}
void FastCodeGenerator::EmitVariableLoad(Variable* var,
Expression::Context context) {
Expression* rewrite = var->rewrite();
Expression* rewrite = expr->var()->rewrite();
if (rewrite == NULL) {
ASSERT(var->is_global());
ASSERT(expr->var()->is_global());
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
__ push(CodeGenerator::GlobalObject());
__ Move(rcx, var->name());
__ Move(rcx, expr->name());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
// A test rax instruction following the call is used by the IC to
@ -566,7 +550,7 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
// is no test rax instruction here.
__ nop();
DropAndMove(context, rax);
DropAndMove(expr->context(), rax);
} else if (rewrite->AsSlot() != NULL) {
Slot* slot = rewrite->AsSlot();
if (FLAG_debug_code) {
@ -587,7 +571,7 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
UNREACHABLE();
}
}
Move(context, slot, rax);
Move(expr->context(), slot, rax);
} else {
// A variable has been rewritten into an explicit access to
// an object property.
@ -621,7 +605,7 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
// the call. It is treated specially by the LoadIC code.
// Drop key and object left on the stack by IC, and push the result.
DropAndMove(context, rax, 2);
DropAndMove(expr->context(), rax, 2);
}
}
@ -655,14 +639,31 @@ void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Label boilerplate_exists;
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ movq(rbx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
int literal_offset =
FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
__ movq(rax, FieldOperand(rbx, literal_offset));
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &boilerplate_exists);
// Create boilerplate if it does not exist.
// Literal array (0).
__ push(rbx);
// Literal index (1).
__ Push(Smi::FromInt(expr->literal_index()));
// Constant properties (2).
__ Push(expr->constant_properties());
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateObjectLiteral, 3);
__ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
__ bind(&boilerplate_exists);
// rax contains boilerplate.
// Clone boilerplate.
__ push(rax);
if (expr->depth() == 1) {
__ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
} else {
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
__ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
}
// If result_saved == true: The result is saved on top of the
@ -758,14 +759,31 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
Label make_clone;
// Fetch the function's literals array.
__ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ movq(rbx, FieldOperand(rbx, JSFunction::kLiteralsOffset));
// Check if the literal's boilerplate has been instantiated.
int offset =
FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
__ movq(rax, FieldOperand(rbx, offset));
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &make_clone);
// Instantiate the boilerplate.
__ push(rbx);
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(expr->literals());
__ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
__ bind(&make_clone);
// Clone the boilerplate.
__ push(rax);
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
__ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
} else {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
__ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
}
bool result_saved = false; // Is the result saved to the stack?
@ -835,37 +853,10 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop,
Expression::Context context) {
Literal* key = prop->key()->AsLiteral();
__ Move(rcx, key->handle());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
Move(context, rax);
}
void FastCodeGenerator::EmitKeyedPropertyLoad(Expression::Context context) {
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
Move(context, rax);
}
void FastCodeGenerator::EmitCompoundAssignmentOp(Token::Value op,
Expression::Context context) {
GenericBinaryOpStub stub(op,
NO_OVERWRITE,
NO_GENERIC_BINARY_FLAGS);
__ CallStub(&stub);
Move(context, rax);
}
void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
if (var->is_global()) {
// Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in rax, variable name in
@ -970,6 +961,36 @@ void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
UNREACHABLE();
break;
}
} else {
Property* property = var->AsProperty();
ASSERT_NOT_NULL(property);
// A variable has been rewritten into a property on an object.
// Load object and key onto the stack.
Slot* object_slot = property->obj()->AsSlot();
ASSERT_NOT_NULL(object_slot);
Move(Expression::kValue, object_slot, rax);
Literal* key_literal = property->key()->AsLiteral();
ASSERT_NOT_NULL(key_literal);
Move(Expression::kValue, key_literal);
// Value to store was pushed before object and key on the stack.
__ movq(rax, Operand(rsp, 2 * kPointerSize));
// Arguments to ic is value in rax, object and key on stack.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
if (expr->context() == Expression::kEffect) {
__ addq(rsp, Immediate(3 * kPointerSize));
} else if (expr->context() == Expression::kValue) {
// Value is still on the stack in rsp[2 * kPointerSize]
__ addq(rsp, Immediate(2 * kPointerSize));
} else {
__ movq(rax, Operand(rsp, 2 * kPointerSize));
DropAndMove(expr->context(), rax, 3);
}
}
}
@ -1076,9 +1097,7 @@ void FastCodeGenerator::VisitProperty(Property* expr) {
}
void FastCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> ignored,
RelocInfo::Mode mode) {
void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) {
// Code common for calls using the IC.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@ -1091,7 +1110,7 @@ void FastCodeGenerator::EmitCallWithIC(Call* expr,
// Call the IC initialization code.
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
NOT_IN_LOOP);
__ call(ic, mode);
__ call(ic, reloc_info);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
@ -1130,7 +1149,7 @@ void FastCodeGenerator::VisitCall(Call* expr) {
__ Push(var->name());
// Push global object as receiver for the call IC lookup.
__ push(CodeGenerator::GlobalObject());
EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT);
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
// Call to a lookup slot.
@ -1143,7 +1162,7 @@ void FastCodeGenerator::VisitCall(Call* expr) {
// Call to a named property, use call IC.
__ Push(key->handle());
Visit(prop->obj());
EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
EmitCallWithIC(expr, RelocInfo::CODE_TARGET);
} else {
// Call to a keyed property, use keyed load IC followed by function
// call.
@ -1665,69 +1684,6 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
Move(expr->context(), rax);
}
Register FastCodeGenerator::result_register() { return rax; }
Register FastCodeGenerator::context_register() { return rsi; }
void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset),
static_cast<intptr_t>(frame_offset));
__ movq(Operand(rbp, frame_offset), value);
}
void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
__ movq(dst, CodeGenerator::ContextOperand(rsi, context_index));
}
// ----------------------------------------------------------------------------
// Non-local control flow support.
void FastCodeGenerator::EnterFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
// Cook return address on top of stack (smi encoded Code* delta)
__ movq(rdx, Operand(rsp, 0));
__ Move(rcx, masm_->CodeObject());
__ subq(rdx, rcx);
__ Integer32ToSmi(rdx, rdx);
__ movq(Operand(rsp, 0), rdx);
// Store result register while executing finally block.
__ push(result_register());
}
void FastCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
// Restore result register from stack.
__ pop(result_register());
// Uncook return address.
__ movq(rdx, Operand(rsp, 0));
__ SmiToInteger32(rdx, rdx);
__ Move(rcx, masm_->CodeObject());
__ addq(rdx, rcx);
__ movq(Operand(rsp, 0), rdx);
// And return.
__ ret(0);
}
void FastCodeGenerator::ThrowException() {
__ push(result_register());
__ CallRuntime(Runtime::kThrow, 1);
}
#undef __

103
deps/v8/src/x64/ic-x64.cc

@ -48,13 +48,9 @@ namespace internal {
// must always call a backup property load that is complete.
// This function is safe to call if the receiver has fast properties,
// or if name is not a symbol, and will jump to the miss_label in that case.
static void GenerateDictionaryLoad(MacroAssembler* masm,
Label* miss_label,
Register r0,
Register r1,
Register r2,
Register name,
DictionaryCheck check_dictionary) {
static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
Register r0, Register r1, Register r2,
Register name) {
// Register use:
//
// r0 - used to hold the property dictionary.
@ -90,14 +86,10 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
__ cmpb(r0, Immediate(JS_BUILTINS_OBJECT_TYPE));
__ j(equal, miss_label);
// Load properties array.
// Check that the properties array is a dictionary.
__ movq(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
if (check_dictionary == CHECK_DICTIONARY) {
// Check that the properties array is a dictionary.
__ Cmp(FieldOperand(r0, HeapObject::kMapOffset), Factory::hash_table_map());
__ j(not_equal, miss_label);
}
__ Cmp(FieldOperand(r0, HeapObject::kMapOffset), Factory::hash_table_map());
__ j(not_equal, miss_label);
// Compute the capacity mask.
const int kCapacityOffset =
@ -254,8 +246,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
Label slow, check_string, index_int, index_string;
Label check_pixel_array, probe_dictionary;
Label slow, check_string, index_int, index_string, check_pixel_array;
// Load name and receiver.
__ movq(rax, Operand(rsp, kPointerSize));
@ -328,68 +319,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ movl(rbx, FieldOperand(rax, String::kHashFieldOffset));
__ testl(rbx, Immediate(String::kIsArrayIndexMask));
// Is the string a symbol?
// If the string is a symbol, do a quick inline probe of the receiver's
// dictionary, if it exists.
__ j(not_zero, &index_string); // The value in rbx is used at jump target.
__ testb(FieldOperand(rdx, Map::kInstanceTypeOffset),
Immediate(kIsSymbolMask));
__ j(zero, &slow);
// If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary leaving result in rcx.
__ movq(rbx, FieldOperand(rcx, JSObject::kPropertiesOffset));
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset), Factory::hash_table_map());
__ j(equal, &probe_dictionary);
// Load the map of the receiver, compute the keyed lookup cache hash
// based on 32 bits of the map pointer and the string hash.
__ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
__ movl(rdx, rbx);
__ shr(rdx, Immediate(KeyedLookupCache::kMapHashShift));
__ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
__ shr(rax, Immediate(String::kHashShift));
__ xor_(rdx, rax);
__ and_(rdx, Immediate(KeyedLookupCache::kCapacityMask));
// Load the key (consisting of map and symbol) from the cache and
// check for match.
ExternalReference cache_keys
= ExternalReference::keyed_lookup_cache_keys();
__ movq(rdi, rdx);
__ shl(rdi, Immediate(kPointerSizeLog2 + 1));
__ movq(kScratchRegister, cache_keys);
__ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, 0));
__ j(not_equal, &slow);
__ movq(rdi, Operand(kScratchRegister, rdi, times_1, kPointerSize));
__ cmpq(Operand(rsp, kPointerSize), rdi);
__ j(not_equal, &slow);
// Get field offset which is a 32-bit integer and check that it is
// an in-object property.
ExternalReference cache_field_offsets
= ExternalReference::keyed_lookup_cache_field_offsets();
__ movq(kScratchRegister, cache_field_offsets);
__ movl(rax, Operand(kScratchRegister, rdx, times_4, 0));
__ movzxbq(rdx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
__ cmpq(rax, rdx);
__ j(above_equal, &slow);
// Load in-object property.
__ subq(rax, rdx);
__ movzxbq(rdx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ addq(rax, rdx);
__ movq(rax, FieldOperand(rcx, rax, times_pointer_size, 0));
__ ret(0);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
__ bind(&probe_dictionary);
GenerateDictionaryLoad(masm,
&slow,
rbx,
rcx,
rdx,
rax,
DICTIONARY_CHECK_DONE);
// Probe the dictionary leaving result in rcx.
GenerateDictionaryLoad(masm, &slow, rbx, rcx, rdx, rax);
GenerateCheckNonObjectOrLoaded(masm, &slow, rcx);
__ movq(rax, rcx);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
@ -916,7 +853,9 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
}
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
void CallIC::Generate(MacroAssembler* masm,
int argc,
ExternalReference const& f) {
// Get the receiver of the function from the stack; 1 ~ return address.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Get the name of the function to call from the stack.
@ -933,7 +872,7 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// Call the entry.
CEntryStub stub(1);
__ movq(rax, Immediate(2));
__ movq(rbx, ExternalReference(IC_Utility(kCallIC_Miss)));
__ movq(rbx, f);
__ CallStub(&stub);
// Move result to rdi and exit the internal frame.
@ -1024,7 +963,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm, argc);
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
}
@ -1032,8 +971,8 @@ static void GenerateNormalHelper(MacroAssembler* masm,
int argc,
bool is_global_object,
Label* miss) {
// Search dictionary - put result in register rdx.
GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx, CHECK_DICTIONARY);
// Search dictionary - put result in register edx.
GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx);
// Move the result to register rdi and check that it isn't a smi.
__ movq(rdi, rdx);
@ -1126,7 +1065,7 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm, argc);
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
}
@ -1257,9 +1196,9 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &miss);
// Search the dictionary placing the result in rax.
// Search the dictionary placing the result in eax.
__ bind(&probe);
GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx, rcx, CHECK_DICTIONARY);
GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx, rcx);
GenerateCheckNonObjectOrLoaded(masm, &miss, rax);
__ ret(0);

125
deps/v8/src/x64/macro-assembler-x64.cc

@ -310,12 +310,6 @@ void MacroAssembler::CallStub(CodeStub* stub) {
}
void MacroAssembler::TailCallStub(CodeStub* stub) {
ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
void MacroAssembler::StubReturn(int argc) {
ASSERT(argc >= 1 && generating_stub());
ret((argc - 1) * kPointerSize);
@ -1345,13 +1339,6 @@ void MacroAssembler::Push(Smi* source) {
}
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
addq(rsp, Immediate(stack_elements * kPointerSize));
}
}
void MacroAssembler::Test(const Operand& src, Smi* source) {
intptr_t smi = reinterpret_cast<intptr_t>(source);
if (is_int32(smi)) {
@ -1438,16 +1425,6 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
}
void MacroAssembler::PopTryHandler() {
ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
// Unlink this handler.
movq(kScratchRegister, ExternalReference(Top::k_handler_address));
pop(Operand(kScratchRegister, 0));
// Remove the remaining fields.
addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
}
void MacroAssembler::Ret() {
ret(0);
}
@ -2267,108 +2244,6 @@ void MacroAssembler::AllocateHeapNumber(Register result,
}
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
ASSERT(kShortSize == 2);
// scratch1 = length * 2 + kObjectAlignmentMask.
lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
and_(scratch1, Immediate(~kObjectAlignmentMask));
// Allocate two byte string in new space.
AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
times_1,
scratch1,
result,
scratch2,
scratch3,
gc_required,
TAG_OBJECT);
// Set the map, length and hash field.
LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
movl(FieldOperand(result, String::kLengthOffset), length);
movl(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
void MacroAssembler::AllocateAsciiString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
movl(scratch1, length);
ASSERT(kCharSize == 1);
addq(scratch1, Immediate(kObjectAlignmentMask));
and_(scratch1, Immediate(~kObjectAlignmentMask));
// Allocate ascii string in new space.
AllocateInNewSpace(SeqAsciiString::kHeaderSize,
times_1,
scratch1,
result,
scratch2,
scratch3,
gc_required,
TAG_OBJECT);
// Set the map, length and hash field.
LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
movl(FieldOperand(result, String::kLengthOffset), length);
movl(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
void MacroAssembler::AllocateConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
AllocateInNewSpace(ConsString::kSize,
result,
scratch1,
scratch2,
gc_required,
TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
void MacroAssembler::AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
AllocateInNewSpace(ConsString::kSize,
result,
scratch1,
scratch2,
gc_required,
TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.

35
deps/v8/src/x64/macro-assembler-x64.h

@ -400,7 +400,7 @@ class MacroAssembler: public Assembler {
void Test(const Operand& dst, Smi* source);
// ---------------------------------------------------------------------------
// Macro instructions.
// Macro instructions
// Load a register with a long value as efficiently as possible.
void Set(Register dst, int64_t x);
@ -412,8 +412,6 @@ class MacroAssembler: public Assembler {
void Cmp(Register dst, Handle<Object> source);
void Cmp(const Operand& dst, Handle<Object> source);
void Push(Handle<Object> source);
void Drop(int stack_elements);
void Call(Label* target) { call(target); }
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
@ -445,8 +443,6 @@ class MacroAssembler: public Assembler {
// address must be pushed before calling this helper.
void PushTryHandler(CodeLocation try_location, HandlerType type);
// Unlink the stack handler on top of the stack from the try handler chain.
void PopTryHandler();
// ---------------------------------------------------------------------------
// Inline caching support
@ -522,32 +518,6 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* gc_required);
// Allocate a sequential string. All the header fields of the string object
// are initialized.
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required);
void AllocateAsciiString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required);
// Allocate a raw cons string object. Only the map field of the result is
// initialized.
void AllocateConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required);
void AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required);
// ---------------------------------------------------------------------------
// Support functions.
@ -587,9 +557,6 @@ class MacroAssembler: public Assembler {
// Call a code stub.
void CallStub(CodeStub* stub);
// Tail call a code stub (jump).
void TailCallStub(CodeStub* stub);
// Return from a code stub after popping its arguments.
void StubReturn(int argc);

47
deps/v8/src/x64/stub-cache-x64.cc

@ -956,24 +956,8 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
__ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
if (Heap::InNewSpace(function)) {
// We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
// function can all use this call IC. Before we load through the
// function, we have to verify that it still is a function.
__ JumpIfSmi(rdi, &miss);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &miss);
// Check the shared function info. Make sure it hasn't changed.
__ Move(rcx, Handle<SharedFunctionInfo>(function->shared()));
__ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rcx);
__ j(not_equal, &miss);
} else {
__ Cmp(rdi, Handle<JSFunction>(function));
__ j(not_equal, &miss);
}
__ Cmp(rdi, Handle<JSFunction>(function));
__ j(not_equal, &miss);
// Patch the receiver on the stack with the global proxy.
if (object->IsGlobalObject()) {
@ -1003,10 +987,10 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
}
Object* LoadStubCompiler::CompileLoadCallback(String* name,
JSObject* object,
Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
JSObject* holder,
AccessorInfo* callback) {
AccessorInfo* callback,
String* name) {
// ----------- S t a t e -------------
// -- rcx : name
// -- rsp[0] : return address
@ -1015,11 +999,8 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name,
Label miss;
__ movq(rax, Operand(rsp, kPointerSize));
Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx,
callback, name, &miss, &failure);
if (!success) return failure;
GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx,
callback, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@ -1173,11 +1154,8 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(receiver, holder, rcx, rax, rbx, rdx,
callback, name, &miss, &failure);
if (!success) return failure;
GenerateLoadCallback(receiver, holder, rcx, rax, rbx, rdx,
callback, name, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_callback, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@ -1632,7 +1610,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
}
bool StubCompiler::GenerateLoadCallback(JSObject* object,
void StubCompiler::GenerateLoadCallback(JSObject* object,
JSObject* holder,
Register receiver,
Register name_reg,
@ -1640,8 +1618,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
Register scratch2,
AccessorInfo* callback,
String* name,
Label* miss,
Failure** failure) {
Label* miss) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
@ -1664,8 +1641,6 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
ExternalReference load_callback_property =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallRuntime(load_callback_property, 5, 1);
return true;
}

34
deps/v8/test/cctest/test-api.cc

@ -447,40 +447,6 @@ THREADED_TEST(UsingExternalAsciiString) {
}
THREADED_TEST(ScavengeExternalString) {
TestResource::dispose_count = 0;
{
v8::HandleScope scope;
uint16_t* two_byte_string = AsciiToTwoByteString("test string");
Local<String> string =
String::NewExternal(new TestResource(two_byte_string));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
i::Heap::CollectGarbage(0, i::NEW_SPACE);
CHECK(i::Heap::InNewSpace(*istring));
CHECK_EQ(0, TestResource::dispose_count);
}
i::Heap::CollectGarbage(0, i::NEW_SPACE);
CHECK_EQ(1, TestResource::dispose_count);
}
THREADED_TEST(ScavengeExternalAsciiString) {
TestAsciiResource::dispose_count = 0;
{
v8::HandleScope scope;
const char* one_byte_string = "test string";
Local<String> string = String::NewExternal(
new TestAsciiResource(i::StrDup(one_byte_string)));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
i::Heap::CollectGarbage(0, i::NEW_SPACE);
CHECK(i::Heap::InNewSpace(*istring));
CHECK_EQ(0, TestAsciiResource::dispose_count);
}
i::Heap::CollectGarbage(0, i::NEW_SPACE);
CHECK_EQ(1, TestAsciiResource::dispose_count);
}
THREADED_TEST(StringConcat) {
{
v8::HandleScope scope;

33
deps/v8/test/cctest/test-debug.cc

@ -3141,39 +3141,6 @@ TEST(DisableBreak) {
CheckDebuggerUnloaded();
}
static const char* kSimpleExtensionSource =
"(function Foo() {"
" return 4;"
"})() ";
// http://crbug.com/28933
// Test that debug break is disabled when bootstrapper is active.
TEST(NoBreakWhenBootstrapping) {
v8::HandleScope scope;
// Register a debug event listener which sets the break flag and counts.
v8::Debug::SetDebugEventListener(DebugEventCounter);
// Set the debug break flag.
v8::Debug::DebugBreak();
break_point_hit_count = 0;
{
// Create a context with an extension to make sure that some JavaScript
// code is executed during bootstrapping.
v8::RegisterExtension(new v8::Extension("simpletest",
kSimpleExtensionSource));
const char* extension_names[] = { "simpletest" };
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Persistent<v8::Context> context = v8::Context::New(&extensions);
context.Dispose();
}
// Check that no DebugBreak events occured during the context creation.
CHECK_EQ(0, break_point_hit_count);
// Get rid of the debug event listener.
v8::Debug::SetDebugEventListener(NULL);
CheckDebuggerUnloaded();
}
static v8::Handle<v8::Array> NamedEnum(const v8::AccessorInfo&) {
v8::Handle<v8::Array> result = v8::Array::New(3);

12
deps/v8/test/cctest/test-macro-assembler-x64.cc

@ -91,14 +91,14 @@ typedef int (*F0)();
TEST(Smi) {
// Check that C++ Smi operations work as expected.
int64_t test_numbers[] = {
intptr_t test_numbers[] = {
0, 1, -1, 127, 128, -128, -129, 255, 256, -256, -257,
Smi::kMaxValue, static_cast<int64_t>(Smi::kMaxValue) + 1,
Smi::kMinValue, static_cast<int64_t>(Smi::kMinValue) - 1
Smi::kMaxValue, static_cast<intptr_t>(Smi::kMaxValue) + 1,
Smi::kMinValue, static_cast<intptr_t>(Smi::kMinValue) - 1
};
int test_number_count = 15;
for (int i = 0; i < test_number_count; i++) {
int64_t number = test_numbers[i];
intptr_t number = test_numbers[i];
bool is_valid = Smi::IsValid(number);
bool is_in_range = number >= Smi::kMinValue && number <= Smi::kMaxValue;
CHECK_EQ(is_in_range, is_valid);
@ -108,8 +108,8 @@ TEST(Smi) {
Smi* smi_from_int = Smi::FromInt(static_cast<int32_t>(number));
CHECK_EQ(smi_from_int, smi_from_intptr);
}
int64_t smi_value = smi_from_intptr->value();
CHECK_EQ(number, smi_value);
int smi_value = smi_from_intptr->value();
CHECK_EQ(number, static_cast<intptr_t>(smi_value));
}
}
}

35
deps/v8/test/mjsunit/compiler/thisfunction.js

@ -1,35 +0,0 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --always_fast_compiler
// Test reference to this-function.
var g = (function f(x) {
if (x == 1) return 42; else return f(1);
})(0);
assertEquals(42, g);

1
deps/v8/test/mjsunit/fuzz-natives.js

@ -129,6 +129,7 @@ var knownProblems = {
"Log": true,
"DeclareGlobals": true,
"CollectStackTrace": true,
"PromoteScheduledException": true,
"DeleteHandleScopeExtensions": true
};

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save