Browse Source

Upgrade V8 to 2.0.5

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
20b945df70
  1. 1
      deps/v8/.gitignore
  2. 3
      deps/v8/AUTHORS
  3. 50
      deps/v8/ChangeLog
  4. 19
      deps/v8/SConstruct
  5. 26
      deps/v8/include/v8.h
  6. 7
      deps/v8/src/SConscript
  7. 94
      deps/v8/src/api.cc
  8. 186
      deps/v8/src/arm/assembler-arm.cc
  9. 221
      deps/v8/src/arm/assembler-arm.h
  10. 267
      deps/v8/src/arm/assembler-thumb2-inl.h
  11. 1821
      deps/v8/src/arm/assembler-thumb2.cc
  12. 1027
      deps/v8/src/arm/assembler-thumb2.h
  13. 92
      deps/v8/src/arm/codegen-arm.cc
  14. 21
      deps/v8/src/arm/codegen-arm.h
  15. 21
      deps/v8/src/arm/disasm-arm.cc
  16. 350
      deps/v8/src/arm/fast-codegen-arm.cc
  17. 4
      deps/v8/src/arm/frames-arm.cc
  18. 10
      deps/v8/src/arm/ic-arm.cc
  19. 30
      deps/v8/src/arm/macro-assembler-arm.cc
  20. 6
      deps/v8/src/arm/macro-assembler-arm.h
  21. 20
      deps/v8/src/arm/simulator-arm.cc
  22. 47
      deps/v8/src/arm/stub-cache-arm.cc
  23. 3
      deps/v8/src/arm/virtual-frame-arm.cc
  24. 1
      deps/v8/src/arm/virtual-frame-arm.h
  25. 10
      deps/v8/src/assembler.cc
  26. 4
      deps/v8/src/assembler.h
  27. 17
      deps/v8/src/ast.h
  28. 26
      deps/v8/src/bootstrapper.cc
  29. 4
      deps/v8/src/bootstrapper.h
  30. 143
      deps/v8/src/code-stubs.cc
  31. 19
      deps/v8/src/code-stubs.h
  32. 49
      deps/v8/src/codegen.h
  33. 78
      deps/v8/src/compiler.cc
  34. 6
      deps/v8/src/execution.cc
  35. 15
      deps/v8/src/factory.cc
  36. 6
      deps/v8/src/factory.h
  37. 308
      deps/v8/src/fast-codegen.cc
  38. 195
      deps/v8/src/fast-codegen.h
  39. 17
      deps/v8/src/global-handles.cc
  40. 19
      deps/v8/src/globals.h
  41. 69
      deps/v8/src/heap-inl.h
  42. 5
      deps/v8/src/heap-profiler.cc
  43. 6
      deps/v8/src/heap-profiler.h
  44. 159
      deps/v8/src/heap.cc
  45. 69
      deps/v8/src/heap.h
  46. 11
      deps/v8/src/ia32/assembler-ia32.cc
  47. 1
      deps/v8/src/ia32/assembler-ia32.h
  48. 47
      deps/v8/src/ia32/builtins-ia32.cc
  49. 458
      deps/v8/src/ia32/codegen-ia32.cc
  50. 25
      deps/v8/src/ia32/codegen-ia32.h
  51. 8
      deps/v8/src/ia32/disasm-ia32.cc
  52. 381
      deps/v8/src/ia32/fast-codegen-ia32.cc
  53. 165
      deps/v8/src/ia32/ic-ia32.cc
  54. 48
      deps/v8/src/ia32/macro-assembler-ia32.cc
  55. 22
      deps/v8/src/ia32/macro-assembler-ia32.h
  56. 143
      deps/v8/src/ia32/stub-cache-ia32.cc
  57. 11
      deps/v8/src/ia32/virtual-frame-ia32.cc
  58. 6
      deps/v8/src/ia32/virtual-frame-ia32.h
  59. 22
      deps/v8/src/ic.cc
  60. 24
      deps/v8/src/ic.h
  61. 5
      deps/v8/src/macro-assembler.h
  62. 74
      deps/v8/src/mark-compact.cc
  63. 35
      deps/v8/src/math.js
  64. 5
      deps/v8/src/messages.js
  65. 12
      deps/v8/src/objects-inl.h
  66. 14
      deps/v8/src/objects.cc
  67. 47
      deps/v8/src/objects.h
  68. 3
      deps/v8/src/parser.cc
  69. 10
      deps/v8/src/prettyprinter.cc
  70. 2
      deps/v8/src/prettyprinter.h
  71. 2
      deps/v8/src/rewriter.cc
  72. 105
      deps/v8/src/runtime.cc
  73. 4
      deps/v8/src/runtime.h
  74. 6
      deps/v8/src/runtime.js
  75. 3
      deps/v8/src/scopes.cc
  76. 11
      deps/v8/src/scopes.h
  77. 22
      deps/v8/src/serialize.cc
  78. 4
      deps/v8/src/spaces.cc
  79. 27
      deps/v8/src/spaces.h
  80. 4
      deps/v8/src/stub-cache.cc
  81. 11
      deps/v8/src/stub-cache.h
  82. 2
      deps/v8/src/token.cc
  83. 9
      deps/v8/src/token.h
  84. 2
      deps/v8/src/v8-counters.h
  85. 207
      deps/v8/src/v8natives.js
  86. 4
      deps/v8/src/variables.cc
  87. 14
      deps/v8/src/variables.h
  88. 2
      deps/v8/src/version.cc
  89. 276
      deps/v8/src/x64/codegen-x64.cc
  90. 38
      deps/v8/src/x64/codegen-x64.h
  91. 354
      deps/v8/src/x64/fast-codegen-x64.cc
  92. 103
      deps/v8/src/x64/ic-x64.cc
  93. 125
      deps/v8/src/x64/macro-assembler-x64.cc
  94. 35
      deps/v8/src/x64/macro-assembler-x64.h
  95. 47
      deps/v8/src/x64/stub-cache-x64.cc
  96. 34
      deps/v8/test/cctest/test-api.cc
  97. 33
      deps/v8/test/cctest/test-debug.cc
  98. 12
      deps/v8/test/cctest/test-macro-assembler-x64.cc
  99. 35
      deps/v8/test/mjsunit/compiler/thisfunction.js
  100. 1
      deps/v8/test/mjsunit/fuzz-natives.js

1
deps/v8/.gitignore

@ -14,6 +14,7 @@
*.pdb *.pdb
#*# #*#
*~ *~
.cpplint-cache
d8 d8
d8_g d8_g
shell shell

3
deps/v8/AUTHORS

@ -13,10 +13,11 @@ Daniel James <dnljms@gmail.com>
Jan de Mooij <jandemooij@gmail.com> Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com> Jay Freeman <saurik@saurik.com>
Joel Stanley <joel.stan@gmail.com> Joel Stanley <joel.stan@gmail.com>
John Jozwiak <jjozwiak@codeaurora.org>
Matt Hanselman <mjhanselman@gmail.com> Matt Hanselman <mjhanselman@gmail.com>
Paolo Giarrusso <p.giarrusso@gmail.com> Paolo Giarrusso <p.giarrusso@gmail.com>
Rafal Krypa <rafal@krypa.net> Rafal Krypa <rafal@krypa.net>
Rene Rebe <rene@exactcode.de> Rene Rebe <rene@exactcode.de>
Ryan Dahl <coldredlemur@gmail.com> Ryan Dahl <coldredlemur@gmail.com>
Patrick Gansterer <paroga@paroga.com> Patrick Gansterer <paroga@paroga.com>
John Jozwiak <jjozwiak@codeaurora.org> Subrato K De <subratokde@codeaurora.org>

50
deps/v8/ChangeLog

@ -1,3 +1,35 @@
2009-12-18: Version 2.0.5
Extended to upper limit of map space to allow for 7 times as many map
to be allocated (issue 524).
Improved performance of code using closures.
Improved performance of some binary operations involving doubles.
2009-12-16: Version 2.0.4
Added ECMAScript 5 Object.create.
Improved performance of Math.max and Math.min.
Optimized adding of strings on 64-bit platforms.
Improved handling of external strings by using a separate table
instead of weak handles. This improves garbage collection
performance and uses less memory.
Changed code generation for object and array literals in toplevel
code to be more compact by doing more work in the runtime.
Fixed a crash bug triggered when garbage collection happened during
generation of a callback load inline cache stub.
Fixed crash bug sometimes triggered when local variables shadowed
parameters in functions that used the arguments object.
2009-12-03: Version 2.0.3 2009-12-03: Version 2.0.3
Optimized handling and adding of strings, for-in and Array.join. Optimized handling and adding of strings, for-in and Array.join.
@ -35,7 +67,7 @@
Reverted a change which caused Chromium interactive ui test Reverted a change which caused Chromium interactive ui test
failures. failures.
2009-11-18: Version 2.0.0 2009-11-18: Version 2.0.0
Added support for VFP on ARM. Added support for VFP on ARM.
@ -80,7 +112,7 @@
2009-10-16: Version 1.3.16 2009-10-16: Version 1.3.16
X64: Convert smis to holding 32 bits of payload. X64: Convert smis to holding 32 bits of payload.
Introduce v8::Integer::NewFromUnsigned method. Introduce v8::Integer::NewFromUnsigned method.
@ -225,7 +257,7 @@
notifications when V8 has not yet been initialized. notifications when V8 has not yet been initialized.
Fixed ARM simulator compilation problem on Windows. Fixed ARM simulator compilation problem on Windows.
2009-08-25: Version 1.3.7 2009-08-25: Version 1.3.7
@ -340,9 +372,9 @@
function is a built-in. function is a built-in.
Initial implementation of constructor heap profile for JS objects. Initial implementation of constructor heap profile for JS objects.
More fine grained control of profiling aspects through the API. More fine grained control of profiling aspects through the API.
Optimized the called as constructor check for API calls. Optimized the called as constructor check for API calls.
@ -367,8 +399,8 @@
Added an external allocation limit to avoid issues where small V8 Added an external allocation limit to avoid issues where small V8
objects would hold on to large amounts of external memory without objects would hold on to large amounts of external memory without
causing garbage collections. causing garbage collections.
Finished more of the inline caching stubs for x64 targets. Finished more of the inline caching stubs for x64 targets.
2009-07-13: Version 1.2.14 2009-07-13: Version 1.2.14
@ -448,9 +480,9 @@
Fixed a bug in the string type inference. Fixed a bug in the string type inference.
Fixed a bug in the handling of 'constant function' properties. Fixed a bug in the handling of 'constant function' properties.
Improved overall performance. Improved overall performance.
2009-06-16: Version 1.2.8 2009-06-16: Version 1.2.8

19
deps/v8/SConstruct

@ -143,6 +143,9 @@ LIBRARY_FLAGS = {
}, },
'os:macos': { 'os:macos': {
'CCFLAGS': ['-ansi', '-mmacosx-version-min=10.4'], 'CCFLAGS': ['-ansi', '-mmacosx-version-min=10.4'],
'library:shared': {
'CPPDEFINES': ['V8_SHARED']
}
}, },
'os:freebsd': { 'os:freebsd': {
'CPPPATH' : ['/usr/local/include'], 'CPPPATH' : ['/usr/local/include'],
@ -178,6 +181,12 @@ LIBRARY_FLAGS = {
'CCFLAGS': ['-m32'], 'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32'] 'LINKFLAGS': ['-m32']
}, },
'armvariant:thumb2': {
'CPPDEFINES': ['V8_ARM_VARIANT_THUMB']
},
'armvariant:arm': {
'CPPDEFINES': ['V8_ARM_VARIANT_ARM']
},
'arch:x64': { 'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64'], 'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
'CCFLAGS': ['-m64'], 'CCFLAGS': ['-m64'],
@ -243,6 +252,7 @@ V8_EXTRA_FLAGS = {
'gcc': { 'gcc': {
'all': { 'all': {
'WARNINGFLAGS': ['-Wall', 'WARNINGFLAGS': ['-Wall',
'-Werror',
'-W', '-W',
'-Wno-unused-parameter', '-Wno-unused-parameter',
'-Wnon-virtual-dtor'] '-Wnon-virtual-dtor']
@ -655,6 +665,11 @@ SIMPLE_OPTIONS = {
'values': ['default', 'hidden'], 'values': ['default', 'hidden'],
'default': 'hidden', 'default': 'hidden',
'help': 'shared library symbol visibility' 'help': 'shared library symbol visibility'
},
'armvariant': {
'values': ['arm', 'thumb2', 'none'],
'default': 'none',
'help': 'generate thumb2 instructions instead of arm instructions (default)'
} }
} }
@ -838,6 +853,10 @@ def PostprocessOptions(options):
# Print a warning if profiling is enabled without profiling support # Print a warning if profiling is enabled without profiling support
print "Warning: forcing profilingsupport on when prof is on" print "Warning: forcing profilingsupport on when prof is on"
options['profilingsupport'] = 'on' options['profilingsupport'] = 'on'
if (options['armvariant'] == 'none' and options['arch'] == 'arm'):
options['armvariant'] = 'arm'
if (options['armvariant'] != 'none' and options['arch'] != 'arm'):
options['armvariant'] = 'none'
def ParseEnvOverrides(arg, imports): def ParseEnvOverrides(arg, imports):

26
deps/v8/include/v8.h

@ -833,13 +833,26 @@ class V8EXPORT String : public Primitive {
* Returns true if the string is both external and ascii * Returns true if the string is both external and ascii
*/ */
bool IsExternalAscii() const; bool IsExternalAscii() const;
class V8EXPORT ExternalStringResourceBase {
public:
virtual ~ExternalStringResourceBase() {}
protected:
ExternalStringResourceBase() {}
private:
// Disallow copying and assigning.
ExternalStringResourceBase(const ExternalStringResourceBase&);
void operator=(const ExternalStringResourceBase&);
};
/** /**
* An ExternalStringResource is a wrapper around a two-byte string * An ExternalStringResource is a wrapper around a two-byte string
* buffer that resides outside V8's heap. Implement an * buffer that resides outside V8's heap. Implement an
* ExternalStringResource to manage the life cycle of the underlying * ExternalStringResource to manage the life cycle of the underlying
* buffer. Note that the string data must be immutable. * buffer. Note that the string data must be immutable.
*/ */
class V8EXPORT ExternalStringResource { // NOLINT class V8EXPORT ExternalStringResource
: public ExternalStringResourceBase {
public: public:
/** /**
* Override the destructor to manage the life cycle of the underlying * Override the destructor to manage the life cycle of the underlying
@ -852,10 +865,6 @@ class V8EXPORT String : public Primitive {
virtual size_t length() const = 0; virtual size_t length() const = 0;
protected: protected:
ExternalStringResource() {} ExternalStringResource() {}
private:
// Disallow copying and assigning.
ExternalStringResource(const ExternalStringResource&);
void operator=(const ExternalStringResource&);
}; };
/** /**
@ -869,7 +878,8 @@ class V8EXPORT String : public Primitive {
* Use String::New or convert to 16 bit data for non-ASCII. * Use String::New or convert to 16 bit data for non-ASCII.
*/ */
class V8EXPORT ExternalAsciiStringResource { // NOLINT class V8EXPORT ExternalAsciiStringResource
: public ExternalStringResourceBase {
public: public:
/** /**
* Override the destructor to manage the life cycle of the underlying * Override the destructor to manage the life cycle of the underlying
@ -882,10 +892,6 @@ class V8EXPORT String : public Primitive {
virtual size_t length() const = 0; virtual size_t length() const = 0;
protected: protected:
ExternalAsciiStringResource() {} ExternalAsciiStringResource() {}
private:
// Disallow copying and assigning.
ExternalAsciiStringResource(const ExternalAsciiStringResource&);
void operator=(const ExternalAsciiStringResource&);
}; };
/** /**

7
deps/v8/src/SConscript

@ -106,7 +106,6 @@ SOURCES = {
zone.cc zone.cc
"""), """),
'arch:arm': Split(""" 'arch:arm': Split("""
arm/assembler-arm.cc
arm/builtins-arm.cc arm/builtins-arm.cc
arm/codegen-arm.cc arm/codegen-arm.cc
arm/constants-arm.cc arm/constants-arm.cc
@ -123,6 +122,12 @@ SOURCES = {
arm/stub-cache-arm.cc arm/stub-cache-arm.cc
arm/virtual-frame-arm.cc arm/virtual-frame-arm.cc
"""), """),
'armvariant:arm': Split("""
arm/assembler-arm.cc
"""),
'armvariant:thumb2': Split("""
arm/assembler-thumb2.cc
"""),
'arch:ia32': Split(""" 'arch:ia32': Split("""
ia32/assembler-ia32.cc ia32/assembler-ia32.cc
ia32/builtins-ia32.cc ia32/builtins-ia32.cc

94
deps/v8/src/api.cc

@ -3082,81 +3082,13 @@ i::Handle<i::String> NewExternalAsciiStringHandle(
} }
static void DisposeExternalString(v8::Persistent<v8::Value> obj,
void* parameter) {
ENTER_V8;
i::ExternalTwoByteString* str =
i::ExternalTwoByteString::cast(*Utils::OpenHandle(*obj));
// External symbols are deleted when they are pruned out of the symbol
// table. Generally external symbols are not registered with the weak handle
// callbacks unless they are upgraded to a symbol after being externalized.
if (!str->IsSymbol()) {
v8::String::ExternalStringResource* resource =
reinterpret_cast<v8::String::ExternalStringResource*>(parameter);
if (resource != NULL) {
const int total_size =
static_cast<int>(resource->length() * sizeof(*resource->data()));
i::Counters::total_external_string_memory.Decrement(total_size);
// The object will continue to live in the JavaScript heap until the
// handle is entirely cleaned out by the next GC. For example the
// destructor for the resource below could bring it back to life again.
// Which is why we make sure to not have a dangling pointer here.
str->set_resource(NULL);
delete resource;
}
}
// In any case we do not need this handle any longer.
obj.Dispose();
}
static void DisposeExternalAsciiString(v8::Persistent<v8::Value> obj,
void* parameter) {
ENTER_V8;
i::ExternalAsciiString* str =
i::ExternalAsciiString::cast(*Utils::OpenHandle(*obj));
// External symbols are deleted when they are pruned out of the symbol
// table. Generally external symbols are not registered with the weak handle
// callbacks unless they are upgraded to a symbol after being externalized.
if (!str->IsSymbol()) {
v8::String::ExternalAsciiStringResource* resource =
reinterpret_cast<v8::String::ExternalAsciiStringResource*>(parameter);
if (resource != NULL) {
const int total_size =
static_cast<int>(resource->length() * sizeof(*resource->data()));
i::Counters::total_external_string_memory.Decrement(total_size);
// The object will continue to live in the JavaScript heap until the
// handle is entirely cleaned out by the next GC. For example the
// destructor for the resource below could bring it back to life again.
// Which is why we make sure to not have a dangling pointer here.
str->set_resource(NULL);
delete resource;
}
}
// In any case we do not need this handle any longer.
obj.Dispose();
}
Local<String> v8::String::NewExternal( Local<String> v8::String::NewExternal(
v8::String::ExternalStringResource* resource) { v8::String::ExternalStringResource* resource) {
EnsureInitialized("v8::String::NewExternal()"); EnsureInitialized("v8::String::NewExternal()");
LOG_API("String::NewExternal"); LOG_API("String::NewExternal");
ENTER_V8; ENTER_V8;
const int total_size =
static_cast<int>(resource->length() * sizeof(*resource->data()));
i::Counters::total_external_string_memory.Increment(total_size);
i::Handle<i::String> result = NewExternalStringHandle(resource); i::Handle<i::String> result = NewExternalStringHandle(resource);
i::Handle<i::Object> handle = i::GlobalHandles::Create(*result); i::ExternalStringTable::AddString(*result);
i::GlobalHandles::MakeWeak(handle.location(),
resource,
&DisposeExternalString);
return Utils::ToLocal(result); return Utils::ToLocal(result);
} }
@ -3168,13 +3100,7 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
i::Handle<i::String> obj = Utils::OpenHandle(this); i::Handle<i::String> obj = Utils::OpenHandle(this);
bool result = obj->MakeExternal(resource); bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) { if (result && !obj->IsSymbol()) {
// Operation was successful and the string is not a symbol. In this case i::ExternalStringTable::AddString(*obj);
// we need to make sure that the we call the destructor for the external
// resource when no strong references to the string remain.
i::Handle<i::Object> handle = i::GlobalHandles::Create(*obj);
i::GlobalHandles::MakeWeak(handle.location(),
resource,
&DisposeExternalString);
} }
return result; return result;
} }
@ -3185,14 +3111,8 @@ Local<String> v8::String::NewExternal(
EnsureInitialized("v8::String::NewExternal()"); EnsureInitialized("v8::String::NewExternal()");
LOG_API("String::NewExternal"); LOG_API("String::NewExternal");
ENTER_V8; ENTER_V8;
const int total_size =
static_cast<int>(resource->length() * sizeof(*resource->data()));
i::Counters::total_external_string_memory.Increment(total_size);
i::Handle<i::String> result = NewExternalAsciiStringHandle(resource); i::Handle<i::String> result = NewExternalAsciiStringHandle(resource);
i::Handle<i::Object> handle = i::GlobalHandles::Create(*result); i::ExternalStringTable::AddString(*result);
i::GlobalHandles::MakeWeak(handle.location(),
resource,
&DisposeExternalAsciiString);
return Utils::ToLocal(result); return Utils::ToLocal(result);
} }
@ -3205,13 +3125,7 @@ bool v8::String::MakeExternal(
i::Handle<i::String> obj = Utils::OpenHandle(this); i::Handle<i::String> obj = Utils::OpenHandle(this);
bool result = obj->MakeExternal(resource); bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) { if (result && !obj->IsSymbol()) {
// Operation was successful and the string is not a symbol. In this case i::ExternalStringTable::AddString(*obj);
// we need to make sure that the we call the destructor for the external
// resource when no strong references to the string remain.
i::Handle<i::Object> handle = i::GlobalHandles::Create(*obj);
i::GlobalHandles::MakeWeak(handle.location(),
resource,
&DisposeExternalAsciiString);
} }
return result; return result;
} }

186
deps/v8/src/arm/assembler-arm.cc

@ -114,55 +114,55 @@ CRegister cr15 = { 15 };
// Support for the VFP registers s0 to s31 (d0 to d15). // Support for the VFP registers s0 to s31 (d0 to d15).
// Note that "sN:sM" is the same as "dN/2". // Note that "sN:sM" is the same as "dN/2".
Register s0 = { 0 }; SwVfpRegister s0 = { 0 };
Register s1 = { 1 }; SwVfpRegister s1 = { 1 };
Register s2 = { 2 }; SwVfpRegister s2 = { 2 };
Register s3 = { 3 }; SwVfpRegister s3 = { 3 };
Register s4 = { 4 }; SwVfpRegister s4 = { 4 };
Register s5 = { 5 }; SwVfpRegister s5 = { 5 };
Register s6 = { 6 }; SwVfpRegister s6 = { 6 };
Register s7 = { 7 }; SwVfpRegister s7 = { 7 };
Register s8 = { 8 }; SwVfpRegister s8 = { 8 };
Register s9 = { 9 }; SwVfpRegister s9 = { 9 };
Register s10 = { 10 }; SwVfpRegister s10 = { 10 };
Register s11 = { 11 }; SwVfpRegister s11 = { 11 };
Register s12 = { 12 }; SwVfpRegister s12 = { 12 };
Register s13 = { 13 }; SwVfpRegister s13 = { 13 };
Register s14 = { 14 }; SwVfpRegister s14 = { 14 };
Register s15 = { 15 }; SwVfpRegister s15 = { 15 };
Register s16 = { 16 }; SwVfpRegister s16 = { 16 };
Register s17 = { 17 }; SwVfpRegister s17 = { 17 };
Register s18 = { 18 }; SwVfpRegister s18 = { 18 };
Register s19 = { 19 }; SwVfpRegister s19 = { 19 };
Register s20 = { 20 }; SwVfpRegister s20 = { 20 };
Register s21 = { 21 }; SwVfpRegister s21 = { 21 };
Register s22 = { 22 }; SwVfpRegister s22 = { 22 };
Register s23 = { 23 }; SwVfpRegister s23 = { 23 };
Register s24 = { 24 }; SwVfpRegister s24 = { 24 };
Register s25 = { 25 }; SwVfpRegister s25 = { 25 };
Register s26 = { 26 }; SwVfpRegister s26 = { 26 };
Register s27 = { 27 }; SwVfpRegister s27 = { 27 };
Register s28 = { 28 }; SwVfpRegister s28 = { 28 };
Register s29 = { 29 }; SwVfpRegister s29 = { 29 };
Register s30 = { 30 }; SwVfpRegister s30 = { 30 };
Register s31 = { 31 }; SwVfpRegister s31 = { 31 };
Register d0 = { 0 }; DwVfpRegister d0 = { 0 };
Register d1 = { 1 }; DwVfpRegister d1 = { 1 };
Register d2 = { 2 }; DwVfpRegister d2 = { 2 };
Register d3 = { 3 }; DwVfpRegister d3 = { 3 };
Register d4 = { 4 }; DwVfpRegister d4 = { 4 };
Register d5 = { 5 }; DwVfpRegister d5 = { 5 };
Register d6 = { 6 }; DwVfpRegister d6 = { 6 };
Register d7 = { 7 }; DwVfpRegister d7 = { 7 };
Register d8 = { 8 }; DwVfpRegister d8 = { 8 };
Register d9 = { 9 }; DwVfpRegister d9 = { 9 };
Register d10 = { 10 }; DwVfpRegister d10 = { 10 };
Register d11 = { 11 }; DwVfpRegister d11 = { 11 };
Register d12 = { 12 }; DwVfpRegister d12 = { 12 };
Register d13 = { 13 }; DwVfpRegister d13 = { 13 };
Register d14 = { 14 }; DwVfpRegister d14 = { 14 };
Register d15 = { 15 }; DwVfpRegister d15 = { 15 };
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Implementation of RelocInfo // Implementation of RelocInfo
@ -1371,11 +1371,10 @@ void Assembler::stc2(Coprocessor coproc,
// Support for VFP. // Support for VFP.
void Assembler::fmdrr(const Register dst, void Assembler::vmov(const DwVfpRegister dst,
const Register src1, const Register src1,
const Register src2, const Register src2,
const SBit s, const Condition cond) {
const Condition cond) {
// Dm = <Rt,Rt2>. // Dm = <Rt,Rt2>.
// Instruction details available in ARM DDI 0406A, A8-646. // Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) | // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
@ -1387,11 +1386,10 @@ void Assembler::fmdrr(const Register dst,
} }
void Assembler::fmrrd(const Register dst1, void Assembler::vmov(const Register dst1,
const Register dst2, const Register dst2,
const Register src, const DwVfpRegister src,
const SBit s, const Condition cond) {
const Condition cond) {
// <Rt,Rt2> = Dm. // <Rt,Rt2> = Dm.
// Instruction details available in ARM DDI 0406A, A8-646. // Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) | // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
@ -1403,9 +1401,8 @@ void Assembler::fmrrd(const Register dst1,
} }
void Assembler::fmsr(const Register dst, void Assembler::vmov(const SwVfpRegister dst,
const Register src, const Register src,
const SBit s,
const Condition cond) { const Condition cond) {
// Sn = Rt. // Sn = Rt.
// Instruction details available in ARM DDI 0406A, A8-642. // Instruction details available in ARM DDI 0406A, A8-642.
@ -1418,9 +1415,8 @@ void Assembler::fmsr(const Register dst,
} }
void Assembler::fmrs(const Register dst, void Assembler::vmov(const Register dst,
const Register src, const SwVfpRegister src,
const SBit s,
const Condition cond) { const Condition cond) {
// Rt = Sn. // Rt = Sn.
// Instruction details available in ARM DDI 0406A, A8-642. // Instruction details available in ARM DDI 0406A, A8-642.
@ -1433,10 +1429,9 @@ void Assembler::fmrs(const Register dst,
} }
void Assembler::fsitod(const Register dst, void Assembler::vcvt(const DwVfpRegister dst,
const Register src, const SwVfpRegister src,
const SBit s, const Condition cond) {
const Condition cond) {
// Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd). // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
// Instruction details available in ARM DDI 0406A, A8-576. // Instruction details available in ARM DDI 0406A, A8-576.
// cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) | // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) |
@ -1448,10 +1443,9 @@ void Assembler::fsitod(const Register dst,
} }
void Assembler::ftosid(const Register dst, void Assembler::vcvt(const SwVfpRegister dst,
const Register src, const DwVfpRegister src,
const SBit s, const Condition cond) {
const Condition cond) {
// Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd). // Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
// Instruction details available in ARM DDI 0406A, A8-576. // Instruction details available in ARM DDI 0406A, A8-576.
// cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)| // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
@ -1463,12 +1457,11 @@ void Assembler::ftosid(const Register dst,
} }
void Assembler::faddd(const Register dst, void Assembler::vadd(const DwVfpRegister dst,
const Register src1, const DwVfpRegister src1,
const Register src2, const DwVfpRegister src2,
const SBit s, const Condition cond) {
const Condition cond) { // Dd = vadd(Dn, Dm) double precision floating point addition.
// Dd = faddd(Dn, Dm) double precision floating point addition.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-536. // Instruction details available in ARM DDI 0406A, A8-536.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) | // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
@ -1479,12 +1472,11 @@ void Assembler::faddd(const Register dst,
} }
void Assembler::fsubd(const Register dst, void Assembler::vsub(const DwVfpRegister dst,
const Register src1, const DwVfpRegister src1,
const Register src2, const DwVfpRegister src2,
const SBit s, const Condition cond) {
const Condition cond) { // Dd = vsub(Dn, Dm) double precision floating point subtraction.
// Dd = fsubd(Dn, Dm) double precision floating point subtraction.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-784. // Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) | // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
@ -1495,12 +1487,11 @@ void Assembler::fsubd(const Register dst,
} }
void Assembler::fmuld(const Register dst, void Assembler::vmul(const DwVfpRegister dst,
const Register src1, const DwVfpRegister src1,
const Register src2, const DwVfpRegister src2,
const SBit s, const Condition cond) {
const Condition cond) { // Dd = vmul(Dn, Dm) double precision floating point multiplication.
// Dd = fmuld(Dn, Dm) double precision floating point multiplication.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-784. // Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) | // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
@ -1511,12 +1502,11 @@ void Assembler::fmuld(const Register dst,
} }
void Assembler::fdivd(const Register dst, void Assembler::vdiv(const DwVfpRegister dst,
const Register src1, const DwVfpRegister src1,
const Register src2, const DwVfpRegister src2,
const SBit s, const Condition cond) {
const Condition cond) { // Dd = vdiv(Dn, Dm) double precision floating point division.
// Dd = fdivd(Dn, Dm) double precision floating point division.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-584. // Instruction details available in ARM DDI 0406A, A8-584.
// cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) | // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
@ -1527,8 +1517,8 @@ void Assembler::fdivd(const Register dst,
} }
void Assembler::fcmp(const Register src1, void Assembler::vcmp(const DwVfpRegister src1,
const Register src2, const DwVfpRegister src2,
const SBit s, const SBit s,
const Condition cond) { const Condition cond) {
// vcmp(Dd, Dm) double precision floating point comparison. // vcmp(Dd, Dm) double precision floating point comparison.

221
deps/v8/src/arm/assembler-arm.h

@ -103,57 +103,94 @@ extern Register sp;
extern Register lr; extern Register lr;
extern Register pc; extern Register pc;
// Support for VFP registers s0 to s32 (d0 to d16).
// Note that "sN:sM" is the same as "dN/2". // Single word VFP register.
extern Register s0; struct SwVfpRegister {
extern Register s1; bool is_valid() const { return 0 <= code_ && code_ < 32; }
extern Register s2; bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
extern Register s3; int code() const {
extern Register s4; ASSERT(is_valid());
extern Register s5; return code_;
extern Register s6; }
extern Register s7; int bit() const {
extern Register s8; ASSERT(is_valid());
extern Register s9; return 1 << code_;
extern Register s10; }
extern Register s11;
extern Register s12; int code_;
extern Register s13; };
extern Register s14;
extern Register s15;
extern Register s16; // Double word VFP register.
extern Register s17; struct DwVfpRegister {
extern Register s18; // Supporting d0 to d15, can be later extended to d31.
extern Register s19; bool is_valid() const { return 0 <= code_ && code_ < 16; }
extern Register s20; bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
extern Register s21; int code() const {
extern Register s22; ASSERT(is_valid());
extern Register s23; return code_;
extern Register s24; }
extern Register s25; int bit() const {
extern Register s26; ASSERT(is_valid());
extern Register s27; return 1 << code_;
extern Register s28; }
extern Register s29;
extern Register s30; int code_;
extern Register s31; };
extern Register d0;
extern Register d1; // Support for VFP registers s0 to s31 (d0 to d15).
extern Register d2; // Note that "s(N):s(N+1)" is the same as "d(N/2)".
extern Register d3; extern SwVfpRegister s0;
extern Register d4; extern SwVfpRegister s1;
extern Register d5; extern SwVfpRegister s2;
extern Register d6; extern SwVfpRegister s3;
extern Register d7; extern SwVfpRegister s4;
extern Register d8; extern SwVfpRegister s5;
extern Register d9; extern SwVfpRegister s6;
extern Register d10; extern SwVfpRegister s7;
extern Register d11; extern SwVfpRegister s8;
extern Register d12; extern SwVfpRegister s9;
extern Register d13; extern SwVfpRegister s10;
extern Register d14; extern SwVfpRegister s11;
extern Register d15; extern SwVfpRegister s12;
extern SwVfpRegister s13;
extern SwVfpRegister s14;
extern SwVfpRegister s15;
extern SwVfpRegister s16;
extern SwVfpRegister s17;
extern SwVfpRegister s18;
extern SwVfpRegister s19;
extern SwVfpRegister s20;
extern SwVfpRegister s21;
extern SwVfpRegister s22;
extern SwVfpRegister s23;
extern SwVfpRegister s24;
extern SwVfpRegister s25;
extern SwVfpRegister s26;
extern SwVfpRegister s27;
extern SwVfpRegister s28;
extern SwVfpRegister s29;
extern SwVfpRegister s30;
extern SwVfpRegister s31;
extern DwVfpRegister d0;
extern DwVfpRegister d1;
extern DwVfpRegister d2;
extern DwVfpRegister d3;
extern DwVfpRegister d4;
extern DwVfpRegister d5;
extern DwVfpRegister d6;
extern DwVfpRegister d7;
extern DwVfpRegister d8;
extern DwVfpRegister d9;
extern DwVfpRegister d10;
extern DwVfpRegister d11;
extern DwVfpRegister d12;
extern DwVfpRegister d13;
extern DwVfpRegister d14;
extern DwVfpRegister d15;
// Coprocessor register // Coprocessor register
struct CRegister { struct CRegister {
@ -759,55 +796,45 @@ class Assembler : public Malloced {
// However, some simple modifications can allow // However, some simple modifications can allow
// these APIs to support D16 to D31. // these APIs to support D16 to D31.
void fmdrr(const Register dst, void vmov(const DwVfpRegister dst,
const Register src1, const Register src1,
const Register src2, const Register src2,
const SBit s = LeaveCC, const Condition cond = al);
const Condition cond = al); void vmov(const Register dst1,
void fmrrd(const Register dst1, const Register dst2,
const Register dst2, const DwVfpRegister src,
const Register src,
const SBit s = LeaveCC,
const Condition cond = al);
void fmsr(const Register dst,
const Register src,
const SBit s = LeaveCC,
const Condition cond = al); const Condition cond = al);
void fmrs(const Register dst, void vmov(const SwVfpRegister dst,
const Register src, const Register src,
const SBit s = LeaveCC,
const Condition cond = al); const Condition cond = al);
void fsitod(const Register dst, void vmov(const Register dst,
const Register src, const SwVfpRegister src,
const SBit s = LeaveCC, const Condition cond = al);
const Condition cond = al); void vcvt(const DwVfpRegister dst,
void ftosid(const Register dst, const SwVfpRegister src,
const Register src, const Condition cond = al);
const SBit s = LeaveCC, void vcvt(const SwVfpRegister dst,
const Condition cond = al); const DwVfpRegister src,
const Condition cond = al);
void faddd(const Register dst,
const Register src1, void vadd(const DwVfpRegister dst,
const Register src2, const DwVfpRegister src1,
const SBit s = LeaveCC, const DwVfpRegister src2,
const Condition cond = al); const Condition cond = al);
void fsubd(const Register dst, void vsub(const DwVfpRegister dst,
const Register src1, const DwVfpRegister src1,
const Register src2, const DwVfpRegister src2,
const SBit s = LeaveCC, const Condition cond = al);
const Condition cond = al); void vmul(const DwVfpRegister dst,
void fmuld(const Register dst, const DwVfpRegister src1,
const Register src1, const DwVfpRegister src2,
const Register src2, const Condition cond = al);
const SBit s = LeaveCC, void vdiv(const DwVfpRegister dst,
const Condition cond = al); const DwVfpRegister src1,
void fdivd(const Register dst, const DwVfpRegister src2,
const Register src1, const Condition cond = al);
const Register src2, void vcmp(const DwVfpRegister src1,
const SBit s = LeaveCC, const DwVfpRegister src2,
const Condition cond = al);
void fcmp(const Register src1,
const Register src2,
const SBit s = LeaveCC, const SBit s = LeaveCC,
const Condition cond = al); const Condition cond = al);
void vmrs(const Register dst, void vmrs(const Register dst,

267
deps/v8/src/arm/assembler-thumb2-inl.h

@ -0,0 +1,267 @@
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// - Redistribution in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the
// distribution.
//
// - Neither the name of Sun Microsystems or the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
// The original source code covered by the above license above has been modified
// significantly by Google Inc.
// Copyright 2006-2008 the V8 project authors. All rights reserved.
#ifndef V8_ARM_ASSEMBLER_THUMB2_INL_H_
#define V8_ARM_ASSEMBLER_THUMB2_INL_H_
#include "arm/assembler-thumb2.h"
#include "cpu.h"
namespace v8 {
namespace internal {
Condition NegateCondition(Condition cc) {
ASSERT(cc != al);
return static_cast<Condition>(cc ^ ne);
}
void RelocInfo::apply(intptr_t delta) {
if (RelocInfo::IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p += delta; // relocate entry
}
// We do not use pc relative addressing on ARM, so there is
// nothing else to do.
}
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
return Assembler::target_address_at(pc_);
}
Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_));
}
void RelocInfo::set_target_address(Address target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target);
}
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_at(Assembler::target_address_address_at(pc_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
}
Object** RelocInfo::target_object_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object**>(Assembler::target_address_address_at(pc_));
}
void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
}
Address* RelocInfo::target_reference_address() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address*>(Assembler::target_address_address_at(pc_));
}
Address RelocInfo::call_address() {
ASSERT(IsPatchedReturnSequence());
// The 2 instructions offset assumes patched return sequence.
ASSERT(IsJSReturn(rmode()));
return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
}
void RelocInfo::set_call_address(Address target) {
ASSERT(IsPatchedReturnSequence());
// The 2 instructions offset assumes patched return sequence.
ASSERT(IsJSReturn(rmode()));
Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
}
Object* RelocInfo::call_object() {
return *call_object_address();
}
Object** RelocInfo::call_object_address() {
ASSERT(IsPatchedReturnSequence());
// The 2 instructions offset assumes patched return sequence.
ASSERT(IsJSReturn(rmode()));
return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
}
void RelocInfo::set_call_object(Object* target) {
*call_object_address() = target;
}
bool RelocInfo::IsPatchedReturnSequence() {
// On ARM a "call instruction" is actually two instructions.
// mov lr, pc
// ldr pc, [pc, #XXX]
return (Assembler::instr_at(pc_) == kMovLrPc)
&& ((Assembler::instr_at(pc_ + Assembler::kInstrSize) & kLdrPCPattern)
== kLdrPCPattern);
}
Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
imm32_ = immediate;
rmode_ = rmode;
}
Operand::Operand(const char* s) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(s);
rmode_ = RelocInfo::EMBEDDED_STRING;
}
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Operand::Operand(Object** opp) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(opp);
rmode_ = RelocInfo::NONE;
}
Operand::Operand(Context** cpp) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(cpp);
rmode_ = RelocInfo::NONE;
}
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm32_ = reinterpret_cast<intptr_t>(value);
rmode_ = RelocInfo::NONE;
}
Operand::Operand(Register rm) {
rm_ = rm;
rs_ = no_reg;
shift_op_ = LSL;
shift_imm_ = 0;
}
bool Operand::is_reg() const {
return rm_.is_valid() &&
rs_.is(no_reg) &&
shift_op_ == LSL &&
shift_imm_ == 0;
}
void Assembler::CheckBuffer() {
if (buffer_space() <= kGap) {
GrowBuffer();
}
if (pc_offset() >= next_buffer_check_) {
CheckConstPool(false, true);
}
}
void Assembler::emit(Instr x) {
CheckBuffer();
*reinterpret_cast<Instr*>(pc_) = x;
pc_ += kInstrSize;
}
Address Assembler::target_address_address_at(Address pc) {
Instr instr = Memory::int32_at(pc);
// Verify that the instruction at pc is a ldr<cond> <Rd>, [pc +/- offset_12].
ASSERT((instr & 0x0f7f0000) == 0x051f0000);
int offset = instr & 0xfff; // offset_12 is unsigned
if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
// Verify that the constant pool comes after the instruction referencing it.
ASSERT(offset >= -4);
return pc + offset + 8;
}
Address Assembler::target_address_at(Address pc) {
return Memory::Address_at(target_address_address_at(pc));
}
void Assembler::set_target_at(Address constant_pool_entry,
Address target) {
Memory::Address_at(constant_pool_entry) = target;
}
void Assembler::set_target_address_at(Address pc, Address target) {
Memory::Address_at(target_address_address_at(pc)) = target;
// Intuitively, we would think it is necessary to flush the instruction cache
// after patching a target address in the code as follows:
// CPU::FlushICache(pc, sizeof(target));
// However, on ARM, no instruction was actually patched by the assignment
// above; the target address is not part of an instruction, it is patched in
// the constant pool and is read via a data access; the instruction accessing
// this address in the constant pool remains unchanged.
}
} } // namespace v8::internal
#endif // V8_ARM_ASSEMBLER_THUMB2_INL_H_

1821
deps/v8/src/arm/assembler-thumb2.cc

File diff suppressed because it is too large

1027
deps/v8/src/arm/assembler-thumb2.h

File diff suppressed because it is too large

92
deps/v8/src/arm/codegen-arm.cc

@ -1769,9 +1769,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
primitive.Bind(); primitive.Bind();
frame_->EmitPush(r0); frame_->EmitPush(r0);
Result arg_count(r0); frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
__ mov(r0, Operand(0));
frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, &arg_count, 1);
jsobject.Bind(); jsobject.Bind();
// Get the set of properties (as a FixedArray or Map). // Get the set of properties (as a FixedArray or Map).
@ -1910,9 +1908,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
__ ldr(r0, frame_->ElementAt(4)); // push enumerable __ ldr(r0, frame_->ElementAt(4)); // push enumerable
frame_->EmitPush(r0); frame_->EmitPush(r0);
frame_->EmitPush(r3); // push entry frame_->EmitPush(r3); // push entry
Result arg_count_reg(r0); frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
__ mov(r0, Operand(1));
frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, &arg_count_reg, 2);
__ mov(r3, Operand(r0)); __ mov(r3, Operand(r0));
// If the property has been removed while iterating, we just skip it. // If the property has been removed while iterating, we just skip it.
@ -3660,9 +3656,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
if (property != NULL) { if (property != NULL) {
LoadAndSpill(property->obj()); LoadAndSpill(property->obj());
LoadAndSpill(property->key()); LoadAndSpill(property->key());
Result arg_count(r0); frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
__ mov(r0, Operand(1)); // not counting receiver
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
} else if (variable != NULL) { } else if (variable != NULL) {
Slot* slot = variable->slot(); Slot* slot = variable->slot();
@ -3670,9 +3664,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
LoadGlobal(); LoadGlobal();
__ mov(r0, Operand(variable->name())); __ mov(r0, Operand(variable->name()));
frame_->EmitPush(r0); frame_->EmitPush(r0);
Result arg_count(r0); frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
__ mov(r0, Operand(1)); // not counting receiver
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) { } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// lookup the context holding the named variable // lookup the context holding the named variable
@ -3684,9 +3676,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(r0); frame_->EmitPush(r0);
__ mov(r0, Operand(variable->name())); __ mov(r0, Operand(variable->name()));
frame_->EmitPush(r0); frame_->EmitPush(r0);
Result arg_count(r0); frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
__ mov(r0, Operand(1)); // not counting receiver
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
} else { } else {
// Default: Result of deleting non-global, not dynamically // Default: Result of deleting non-global, not dynamically
@ -3736,9 +3726,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
smi_label.Branch(eq); smi_label.Branch(eq);
frame_->EmitPush(r0); frame_->EmitPush(r0);
Result arg_count(r0); frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, 1);
__ mov(r0, Operand(0)); // not counting receiver
frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, &arg_count, 1);
continue_label.Jump(); continue_label.Jump();
smi_label.Bind(); smi_label.Bind();
@ -3760,9 +3748,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
__ tst(r0, Operand(kSmiTagMask)); __ tst(r0, Operand(kSmiTagMask));
continue_label.Branch(eq); continue_label.Branch(eq);
frame_->EmitPush(r0); frame_->EmitPush(r0);
Result arg_count(r0); frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
__ mov(r0, Operand(0)); // not counting receiver
frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
continue_label.Bind(); continue_label.Bind();
break; break;
} }
@ -3847,9 +3833,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
{ {
// Convert the operand to a number. // Convert the operand to a number.
frame_->EmitPush(r0); frame_->EmitPush(r0);
Result arg_count(r0); frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
__ mov(r0, Operand(0));
frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
} }
if (is_postfix) { if (is_postfix) {
// Postfix: store to result (on the stack). // Postfix: store to result (on the stack).
@ -4235,9 +4219,7 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
case Token::IN: { case Token::IN: {
LoadAndSpill(left); LoadAndSpill(left);
LoadAndSpill(right); LoadAndSpill(right);
Result arg_count(r0); frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
__ mov(r0, Operand(1)); // not counting receiver
frame_->InvokeBuiltin(Builtins::IN, CALL_JS, &arg_count, 2);
frame_->EmitPush(r0); frame_->EmitPush(r0);
break; break;
} }
@ -5079,10 +5061,10 @@ void CompareStub::Generate(MacroAssembler* masm) {
if (CpuFeatures::IsSupported(VFP3)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3); CpuFeatures::Scope scope(VFP3);
// ARMv7 VFP3 instructions to implement double precision comparison. // ARMv7 VFP3 instructions to implement double precision comparison.
__ fmdrr(d6, r0, r1); __ vmov(d6, r0, r1);
__ fmdrr(d7, r2, r3); __ vmov(d7, r2, r3);
__ fcmp(d6, d7); __ vcmp(d6, d7);
__ vmrs(pc); __ vmrs(pc);
__ mov(r0, Operand(0), LeaveCC, eq); __ mov(r0, Operand(0), LeaveCC, eq);
__ mov(r0, Operand(1), LeaveCC, lt); __ mov(r0, Operand(1), LeaveCC, lt);
@ -5145,7 +5127,6 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater) // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer. // tagged as a small integer.
__ mov(r0, Operand(arg_count));
__ InvokeBuiltin(native, CALL_JS); __ InvokeBuiltin(native, CALL_JS);
__ cmp(r0, Operand(0)); __ cmp(r0, Operand(0));
__ pop(pc); __ pop(pc);
@ -5244,7 +5225,6 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// Only first argument is a string. // Only first argument is a string.
__ bind(&string1); __ bind(&string1);
__ mov(r0, Operand(2)); // Set number of arguments.
__ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS); __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
// First argument was not a string, test second. // First argument was not a string, test second.
@ -5256,13 +5236,11 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// Only second argument is a string. // Only second argument is a string.
__ b(&not_strings); __ b(&not_strings);
__ mov(r0, Operand(2)); // Set number of arguments.
__ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS); __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
__ bind(&not_strings); __ bind(&not_strings);
} }
__ mov(r0, Operand(1)); // Set number of arguments.
__ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return. __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
// We branch here if at least one of r0 and r1 is not a Smi. // We branch here if at least one of r0 and r1 is not a Smi.
@ -5353,22 +5331,22 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
CpuFeatures::Scope scope(VFP3); CpuFeatures::Scope scope(VFP3);
// ARMv7 VFP3 instructions to implement // ARMv7 VFP3 instructions to implement
// double precision, add, subtract, multiply, divide. // double precision, add, subtract, multiply, divide.
__ fmdrr(d6, r0, r1); __ vmov(d6, r0, r1);
__ fmdrr(d7, r2, r3); __ vmov(d7, r2, r3);
if (Token::MUL == operation) { if (Token::MUL == operation) {
__ fmuld(d5, d6, d7); __ vmul(d5, d6, d7);
} else if (Token::DIV == operation) { } else if (Token::DIV == operation) {
__ fdivd(d5, d6, d7); __ vdiv(d5, d6, d7);
} else if (Token::ADD == operation) { } else if (Token::ADD == operation) {
__ faddd(d5, d6, d7); __ vadd(d5, d6, d7);
} else if (Token::SUB == operation) { } else if (Token::SUB == operation) {
__ fsubd(d5, d6, d7); __ vsub(d5, d6, d7);
} else { } else {
UNREACHABLE(); UNREACHABLE();
} }
__ fmrrd(r0, r1, d5); __ vmov(r0, r1, d5);
__ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset)); __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
__ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4)); __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
@ -5457,9 +5435,9 @@ static void GetInt32(MacroAssembler* masm,
// ARMv7 VFP3 instructions implementing double precision to integer // ARMv7 VFP3 instructions implementing double precision to integer
// conversion using round to zero. // conversion using round to zero.
__ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset)); __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
__ fmdrr(d7, scratch2, scratch); __ vmov(d7, scratch2, scratch);
__ ftosid(s15, d7); __ vcvt(s15, d7);
__ fmrs(dest, s15); __ vmov(dest, s15);
} else { } else {
// Get the top bits of the mantissa. // Get the top bits of the mantissa.
__ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
@ -5598,7 +5576,6 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
__ bind(&slow); __ bind(&slow);
__ push(r1); // restore stack __ push(r1); // restore stack
__ push(r0); __ push(r0);
__ mov(r0, Operand(1)); // 1 argument (not counting receiver).
switch (op_) { switch (op_) {
case Token::BIT_OR: case Token::BIT_OR:
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
@ -5703,6 +5680,29 @@ static void MultiplyByKnownInt2(
} }
const char* GenericBinaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int len = 100;
name_ = Bootstrapper::AllocateAutoDeletedArray(len);
if (name_ == NULL) return "OOM";
const char* op_name = Token::Name(op_);
const char* overwrite_name;
switch (mode_) {
case NO_OVERWRITE: overwrite_name = "Alloc"; break;
case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
default: overwrite_name = "UnknownOverwrite"; break;
}
OS::SNPrintF(Vector<char>(name_, len),
"GenericBinaryOpStub_%s_%s%s",
op_name,
overwrite_name,
specialized_on_rhs_ ? "_ConstantRhs" : 0);
return name_;
}
void GenericBinaryOpStub::Generate(MacroAssembler* masm) { void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// r1 : x // r1 : x
// r0 : y // r0 : y
@ -5980,7 +5980,6 @@ void UnarySubStub::Generate(MacroAssembler* masm) {
// Enter runtime system. // Enter runtime system.
__ bind(&slow); __ bind(&slow);
__ push(r0); __ push(r0);
__ mov(r0, Operand(0)); // Set number of arguments.
__ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS); __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
__ bind(&not_smi); __ bind(&not_smi);
@ -6456,7 +6455,6 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Slow-case. Tail call builtin. // Slow-case. Tail call builtin.
__ bind(&slow); __ bind(&slow);
__ mov(r0, Operand(1)); // Arg count without receiver.
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS); __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
} }

21
deps/v8/src/arm/codegen-arm.h

@ -455,13 +455,15 @@ class GenericBinaryOpStub : public CodeStub {
: op_(op), : op_(op),
mode_(mode), mode_(mode),
constant_rhs_(constant_rhs), constant_rhs_(constant_rhs),
specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)) { } specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
name_(NULL) { }
private: private:
Token::Value op_; Token::Value op_;
OverwriteMode mode_; OverwriteMode mode_;
int constant_rhs_; int constant_rhs_;
bool specialized_on_rhs_; bool specialized_on_rhs_;
char* name_;
static const int kMaxKnownRhs = 0x40000000; static const int kMaxKnownRhs = 0x40000000;
@ -506,22 +508,7 @@ class GenericBinaryOpStub : public CodeStub {
return key; return key;
} }
const char* GetName() { const char* GetName();
switch (op_) {
case Token::ADD: return "GenericBinaryOpStub_ADD";
case Token::SUB: return "GenericBinaryOpStub_SUB";
case Token::MUL: return "GenericBinaryOpStub_MUL";
case Token::DIV: return "GenericBinaryOpStub_DIV";
case Token::MOD: return "GenericBinaryOpStub_MOD";
case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
case Token::SAR: return "GenericBinaryOpStub_SAR";
case Token::SHL: return "GenericBinaryOpStub_SHL";
case Token::SHR: return "GenericBinaryOpStub_SHR";
default: return "GenericBinaryOpStub";
}
}
#ifdef DEBUG #ifdef DEBUG
void Print() { void Print() {

21
deps/v8/src/arm/disasm-arm.cc

@ -897,15 +897,14 @@ void Decoder::DecodeUnconditional(Instr* instr) {
// void Decoder::DecodeTypeVFP(Instr* instr) // void Decoder::DecodeTypeVFP(Instr* instr)
// Implements the following VFP instructions: // vmov: Sn = Rt
// fmsr: Sn = Rt // vmov: Rt = Sn
// fmrs: Rt = Sn // vcvt: Dd = Sm
// fsitod: Dd = Sm // vcvt: Sd = Dm
// ftosid: Sd = Dm // Dd = vadd(Dn, Dm)
// Dd = faddd(Dn, Dm) // Dd = vsub(Dn, Dm)
// Dd = fsubd(Dn, Dm) // Dd = vmul(Dn, Dm)
// Dd = fmuld(Dn, Dm) // Dd = vdiv(Dn, Dm)
// Dd = fdivd(Dn, Dm)
// vcmp(Dd, Dm) // vcmp(Dd, Dm)
// VMRS // VMRS
void Decoder::DecodeTypeVFP(Instr* instr) { void Decoder::DecodeTypeVFP(Instr* instr) {
@ -997,8 +996,8 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
// Decode Type 6 coprocessor instructions. // Decode Type 6 coprocessor instructions.
// Dm = fmdrr(Rt, Rt2) // Dm = vmov(Rt, Rt2)
// <Rt, Rt2> = fmrrd(Dm) // <Rt, Rt2> = vmov(Dm)
void Decoder::DecodeType6CoprocessorIns(Instr* instr) { void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6)); ASSERT((instr->TypeField() == 6));

350
deps/v8/src/arm/fast-codegen-arm.cc

@ -414,78 +414,98 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
Variable* var = decl->proxy()->var(); Variable* var = decl->proxy()->var();
ASSERT(var != NULL); // Must have been resolved. ASSERT(var != NULL); // Must have been resolved.
Slot* slot = var->slot(); Slot* slot = var->slot();
ASSERT(slot != NULL); // No global declarations here. Property* prop = var->AsProperty();
// We have 3 cases for slots: LOOKUP, LOCAL, CONTEXT. if (slot != NULL) {
switch (slot->type()) { switch (slot->type()) {
case Slot::LOOKUP: { case Slot::PARAMETER: // Fall through.
__ mov(r2, Operand(var->name())); case Slot::LOCAL:
// Declaration nodes are always introduced in one of two modes. if (decl->mode() == Variable::CONST) {
ASSERT(decl->mode() == Variable::VAR || decl->mode() == Variable::CONST); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
PropertyAttributes attr = decl->mode() == Variable::VAR ? __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
NONE : READ_ONLY; } else if (decl->fun() != NULL) {
__ mov(r1, Operand(Smi::FromInt(attr))); Visit(decl->fun());
// Push initial value, if any. __ pop(ip);
// Note: For variables we must not push an initial value (such as __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
// 'undefined') because we may have a (legal) redeclaration and we }
// must not destroy the current value. break;
if (decl->mode() == Variable::CONST) {
__ mov(r0, Operand(Factory::the_hole_value())); case Slot::CONTEXT:
__ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit()); // The variable in the decl always resides in the current context.
} else if (decl->fun() != NULL) { ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
__ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit());
Visit(decl->fun()); // Initial value for function decl.
} else {
__ mov(r0, Operand(Smi::FromInt(0))); // No initial value!
__ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
}
__ CallRuntime(Runtime::kDeclareContextSlot, 4);
break;
}
case Slot::LOCAL:
if (decl->mode() == Variable::CONST) {
__ mov(r0, Operand(Factory::the_hole_value()));
__ str(r0, MemOperand(fp, SlotOffset(var->slot())));
} else if (decl->fun() != NULL) {
Visit(decl->fun());
__ pop(r0);
__ str(r0, MemOperand(fp, SlotOffset(var->slot())));
}
break;
case Slot::CONTEXT:
// The variable in the decl always resides in the current context.
ASSERT(function_->scope()->ContextChainLength(slot->var()->scope()) == 0);
if (decl->mode() == Variable::CONST) {
__ mov(r0, Operand(Factory::the_hole_value()));
if (FLAG_debug_code) { if (FLAG_debug_code) {
// Check if we have the correct context pointer. // Check if we have the correct context pointer.
__ ldr(r1, CodeGenerator::ContextOperand(cp, __ ldr(r1,
Context::FCONTEXT_INDEX)); CodeGenerator::ContextOperand(cp, Context::FCONTEXT_INDEX));
__ cmp(r1, cp); __ cmp(r1, cp);
__ Check(eq, "Unexpected declaration in current context."); __ Check(eq, "Unexpected declaration in current context.");
} }
__ str(r0, CodeGenerator::ContextOperand(cp, slot->index())); if (decl->mode() == Variable::CONST) {
// No write barrier since the_hole_value is in old space. __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
ASSERT(!Heap::InNewSpace(*Factory::the_hole_value())); __ str(ip, CodeGenerator::ContextOperand(cp, slot->index()));
} else if (decl->fun() != NULL) { // No write barrier since the_hole_value is in old space.
} else if (decl->fun() != NULL) {
Visit(decl->fun());
__ pop(r0);
__ str(r0, CodeGenerator::ContextOperand(cp, slot->index()));
int offset = Context::SlotOffset(slot->index());
__ mov(r2, Operand(offset));
// We know that we have written a function, which is not a smi.
__ RecordWrite(cp, r2, r0);
}
break;
case Slot::LOOKUP: {
__ mov(r2, Operand(var->name()));
// Declaration nodes are always introduced in one of two modes.
ASSERT(decl->mode() == Variable::VAR ||
decl->mode() == Variable::CONST);
PropertyAttributes attr =
(decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
__ mov(r1, Operand(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (decl->mode() == Variable::CONST) {
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
__ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
} else if (decl->fun() != NULL) {
__ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit());
Visit(decl->fun()); // Initial value for function decl.
} else {
__ mov(r0, Operand(Smi::FromInt(0))); // No initial value!
__ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
}
__ CallRuntime(Runtime::kDeclareContextSlot, 4);
break;
}
}
} else if (prop != NULL) {
if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
// We are declaring a function or constant that rewrites to a
// property. Use (keyed) IC to set the initial value.
ASSERT_EQ(Expression::kValue, prop->obj()->context());
Visit(prop->obj());
ASSERT_EQ(Expression::kValue, prop->key()->context());
Visit(prop->key());
if (decl->fun() != NULL) {
ASSERT_EQ(Expression::kValue, decl->fun()->context());
Visit(decl->fun()); Visit(decl->fun());
__ pop(r0); __ pop(r0);
if (FLAG_debug_code) { } else {
// Check if we have the correct context pointer. __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
__ ldr(r1, CodeGenerator::ContextOperand(cp,
Context::FCONTEXT_INDEX));
__ cmp(r1, cp);
__ Check(eq, "Unexpected declaration in current context.");
}
__ str(r0, CodeGenerator::ContextOperand(cp, slot->index()));
int offset = Context::SlotOffset(slot->index());
__ mov(r2, Operand(offset));
// We know that we have written a function, which is not a smi.
__ RecordWrite(cp, r2, r0);
} }
break;
default: Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
UNREACHABLE(); __ Call(ic, RelocInfo::CODE_TARGET);
// Value in r0 is ignored (declarations are statements). Receiver
// and key on stack are discarded.
__ add(sp, sp, Operand(2 * kPointerSize));
}
} }
} }
@ -501,21 +521,6 @@ void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
} }
void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
Expression* expr = stmt->expression();
// Complete the statement based on the type of the subexpression.
if (expr->AsLiteral() != NULL) {
__ mov(r0, Operand(expr->AsLiteral()->handle()));
} else {
ASSERT_EQ(Expression::kValue, expr->context());
Visit(expr);
__ pop(r0);
}
EmitReturnSequence(stmt->statement_pos());
}
void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) { void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral"); Comment cmnt(masm_, "[ FunctionLiteral");
@ -536,18 +541,24 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) { void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy"); Comment cmnt(masm_, "[ VariableProxy");
Expression* rewrite = expr->var()->rewrite(); EmitVariableLoad(expr->var(), expr->context());
}
void FastCodeGenerator::EmitVariableLoad(Variable* var,
Expression::Context context) {
Expression* rewrite = var->rewrite();
if (rewrite == NULL) { if (rewrite == NULL) {
ASSERT(expr->var()->is_global()); ASSERT(var->is_global());
Comment cmnt(masm_, "Global variable"); Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in r2 and the global // Use inline caching. Variable name is passed in r2 and the global
// object on the stack. // object on the stack.
__ ldr(ip, CodeGenerator::GlobalObject()); __ ldr(ip, CodeGenerator::GlobalObject());
__ push(ip); __ push(ip);
__ mov(r2, Operand(expr->name())); __ mov(r2, Operand(var->name()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT); __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
DropAndMove(expr->context(), r0); DropAndMove(context, r0);
} else if (rewrite->AsSlot() != NULL) { } else if (rewrite->AsSlot() != NULL) {
Slot* slot = rewrite->AsSlot(); Slot* slot = rewrite->AsSlot();
if (FLAG_debug_code) { if (FLAG_debug_code) {
@ -568,7 +579,7 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
UNREACHABLE(); UNREACHABLE();
} }
} }
Move(expr->context(), slot, r0); Move(context, slot, r0);
} else { } else {
// A variable has been rewritten into an explicit access to // A variable has been rewritten into an explicit access to
// an object property. // an object property.
@ -603,7 +614,7 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
__ Call(ic, RelocInfo::CODE_TARGET); __ Call(ic, RelocInfo::CODE_TARGET);
// Drop key and object left on the stack by IC, and push the result. // Drop key and object left on the stack by IC, and push the result.
DropAndMove(expr->context(), r0, 2); DropAndMove(context, r0, 2);
} }
} }
@ -637,32 +648,15 @@ void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral"); Comment cmnt(masm_, "[ ObjectLiteral");
Label boilerplate_exists;
__ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); __ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
// r2 = literal array (0).
__ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset)); __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
int literal_offset =
FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
__ ldr(r0, FieldMemOperand(r2, literal_offset));
// Check whether we need to materialize the object literal boilerplate.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, Operand(ip));
__ b(ne, &boilerplate_exists);
// Create boilerplate if it does not exist.
// r1 = literal index (1).
__ mov(r1, Operand(Smi::FromInt(expr->literal_index()))); __ mov(r1, Operand(Smi::FromInt(expr->literal_index())));
// r0 = constant properties (2).
__ mov(r0, Operand(expr->constant_properties())); __ mov(r0, Operand(expr->constant_properties()));
__ stm(db_w, sp, r2.bit() | r1.bit() | r0.bit()); __ stm(db_w, sp, r2.bit() | r1.bit() | r0.bit());
__ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
__ bind(&boilerplate_exists);
// r0 contains boilerplate.
// Clone boilerplate.
__ push(r0);
if (expr->depth() > 1) { if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1); __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
} else { } else {
__ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1); __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
} }
// If result_saved == true: The result is saved on top of the // If result_saved == true: The result is saved on top of the
@ -763,32 +757,15 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral"); Comment cmnt(masm_, "[ ArrayLiteral");
Label make_clone;
// Fetch the function's literals array.
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset)); __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
// Check if the literal's boilerplate has been instantiated.
int offset =
FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
__ ldr(r0, FieldMemOperand(r3, offset));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
__ b(&make_clone, ne);
// Instantiate the boilerplate.
__ mov(r2, Operand(Smi::FromInt(expr->literal_index()))); __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(expr->literals())); __ mov(r1, Operand(expr->literals()));
__ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit()); __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
__ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
__ bind(&make_clone);
// Clone the boilerplate.
__ push(r0);
if (expr->depth() > 1) { if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1); __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else { } else {
__ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1); __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} }
bool result_saved = false; // Is the result saved to the stack? bool result_saved = false; // Is the result saved to the stack?
@ -860,10 +837,38 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} }
void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop,
Expression::Context context) {
Literal* key = prop->key()->AsLiteral();
__ mov(r2, Operand(key->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
Move(context, r0);
}
void FastCodeGenerator::EmitKeyedPropertyLoad(Expression::Context context) {
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
Move(context, r0);
}
void FastCodeGenerator::EmitCompoundAssignmentOp(Token::Value op,
Expression::Context context) {
__ pop(r0);
__ pop(r1);
GenericBinaryOpStub stub(op,
NO_OVERWRITE);
__ CallStub(&stub);
Move(context, r0);
}
void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) { void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable(); Variable* var = expr->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL); ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
if (var->is_global()) { if (var->is_global()) {
// Assignment to a global variable. Use inline caching for the // Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in r0, variable name in // assignment. Right-hand-side value is passed in r0, variable name in
@ -976,35 +981,6 @@ void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
UNREACHABLE(); UNREACHABLE();
break; break;
} }
} else {
Property* property = var->rewrite()->AsProperty();
ASSERT_NOT_NULL(property);
// Load object and key onto the stack.
Slot* object_slot = property->obj()->AsSlot();
ASSERT_NOT_NULL(object_slot);
Move(Expression::kValue, object_slot, r0);
Literal* key_literal = property->key()->AsLiteral();
ASSERT_NOT_NULL(key_literal);
Move(Expression::kValue, key_literal);
// Value to store was pushed before object and key on the stack.
__ ldr(r0, MemOperand(sp, 2 * kPointerSize));
// Arguments to ic is value in r0, object and key on stack.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
if (expr->context() == Expression::kEffect) {
__ add(sp, sp, Operand(3 * kPointerSize));
} else if (expr->context() == Expression::kValue) {
// Value is still on the stack in esp[2 * kPointerSize]
__ add(sp, sp, Operand(2 * kPointerSize));
} else {
__ ldr(r0, MemOperand(sp, 2 * kPointerSize));
DropAndMove(expr->context(), r0, 3);
}
} }
} }
@ -1104,7 +1080,9 @@ void FastCodeGenerator::VisitProperty(Property* expr) {
DropAndMove(expr->context(), r0); DropAndMove(expr->context(), r0);
} }
void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) { void FastCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> ignored,
RelocInfo::Mode mode) {
// Code common for calls using the IC. // Code common for calls using the IC.
ZoneList<Expression*>* args = expr->arguments(); ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length(); int arg_count = args->length();
@ -1117,7 +1095,7 @@ void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) {
// Call the IC initialization code. // Call the IC initialization code.
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
NOT_IN_LOOP); NOT_IN_LOOP);
__ Call(ic, reloc_info); __ Call(ic, mode);
// Restore context register. // Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS. // Discard the function left on TOS.
@ -1157,7 +1135,7 @@ void FastCodeGenerator::VisitCall(Call* expr) {
// Push global object as receiver for the call IC lookup. // Push global object as receiver for the call IC lookup.
__ ldr(r0, CodeGenerator::GlobalObject()); __ ldr(r0, CodeGenerator::GlobalObject());
__ stm(db_w, sp, r1.bit() | r0.bit()); __ stm(db_w, sp, r1.bit() | r0.bit());
EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT); EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
} else if (var != NULL && var->slot() != NULL && } else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) { var->slot()->type() == Slot::LOOKUP) {
// Call to a lookup slot. // Call to a lookup slot.
@ -1171,7 +1149,7 @@ void FastCodeGenerator::VisitCall(Call* expr) {
__ mov(r0, Operand(key->handle())); __ mov(r0, Operand(key->handle()));
__ push(r0); __ push(r0);
Visit(prop->obj()); Visit(prop->obj());
EmitCallWithIC(expr, RelocInfo::CODE_TARGET); EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else { } else {
// Call to a keyed property, use keyed load IC followed by function // Call to a keyed property, use keyed load IC followed by function
// call. // call.
@ -1706,7 +1684,63 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
} }
#undef __ void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
Move(expr->context(), r0);
}
Register FastCodeGenerator::result_register() { return r0; }
Register FastCodeGenerator::context_register() { return cp; }
void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ str(value, MemOperand(fp, frame_offset));
}
void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
__ ldr(dst, CodeGenerator::ContextOperand(cp, context_index));
}
// ----------------------------------------------------------------------------
// Non-local control flow support.
void FastCodeGenerator::EnterFinallyBlock() {
ASSERT(!result_register().is(r1));
// Store result register while executing finally block.
__ push(result_register());
// Cook return address in link register to stack (smi encoded Code* delta)
__ sub(r1, lr, Operand(masm_->CodeObject()));
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
ASSERT_EQ(0, kSmiTag);
__ add(r1, r1, Operand(r1)); // Convert to smi.
__ push(r1);
}
void FastCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(r1));
// Restore result register from stack.
__ pop(r1);
// Uncook return address and return.
__ pop(result_register());
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
__ mov(r1, Operand(r1, ASR, 1)); // Un-smi-tag value.
__ add(pc, r1, Operand(masm_->CodeObject()));
}
void FastCodeGenerator::ThrowException() {
__ push(result_register());
__ CallRuntime(Runtime::kThrow, 1);
}
#undef __
} } // namespace v8::internal } } // namespace v8::internal

4
deps/v8/src/arm/frames-arm.cc

@ -28,7 +28,11 @@
#include "v8.h" #include "v8.h"
#include "frames-inl.h" #include "frames-inl.h"
#ifdef V8_ARM_VARIANT_THUMB
#include "arm/assembler-thumb2-inl.h"
#else
#include "arm/assembler-arm-inl.h" #include "arm/assembler-arm-inl.h"
#endif
namespace v8 { namespace v8 {

10
deps/v8/src/arm/ic-arm.cc

@ -276,7 +276,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Cache miss: Jump to runtime. // Cache miss: Jump to runtime.
__ bind(&miss); __ bind(&miss);
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss))); GenerateMiss(masm, argc);
} }
@ -371,13 +371,11 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// Cache miss: Jump to runtime. // Cache miss: Jump to runtime.
__ bind(&miss); __ bind(&miss);
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss))); GenerateMiss(masm, argc);
} }
void CallIC::Generate(MacroAssembler* masm, void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
int argc,
const ExternalReference& f) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- lr: return address // -- lr: return address
// ----------------------------------- // -----------------------------------
@ -394,7 +392,7 @@ void CallIC::Generate(MacroAssembler* masm,
// Call the entry. // Call the entry.
__ mov(r0, Operand(2)); __ mov(r0, Operand(2));
__ mov(r1, Operand(f)); __ mov(r1, Operand(ExternalReference(IC_Utility(kCallIC_Miss))));
CEntryStub stub(1); CEntryStub stub(1);
__ CallStub(&stub); __ CallStub(&stub);

30
deps/v8/src/arm/macro-assembler-arm.cc

@ -162,6 +162,21 @@ void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
} }
void MacroAssembler::Drop(int stack_elements, Condition cond) {
if (stack_elements > 0) {
add(sp, sp, Operand(stack_elements * kPointerSize), LeaveCC, cond);
}
}
void MacroAssembler::Call(Label* target) {
bl(target);
}
void MacroAssembler::Move(Register dst, Handle<Object> value) {
mov(dst, Operand(value));
}
void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) { void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
@ -628,6 +643,15 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
} }
void MacroAssembler::PopTryHandler() {
ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
pop(r1);
mov(ip, Operand(ExternalReference(Top::k_handler_address)));
add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
str(r1, MemOperand(ip));
}
Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg, Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
JSObject* holder, Register holder_reg, JSObject* holder, Register holder_reg,
Register scratch, Register scratch,
@ -994,9 +1018,9 @@ void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
Register outLowReg) { Register outLowReg) {
// ARMv7 VFP3 instructions to implement integer to double conversion. // ARMv7 VFP3 instructions to implement integer to double conversion.
mov(r7, Operand(inReg, ASR, kSmiTagSize)); mov(r7, Operand(inReg, ASR, kSmiTagSize));
fmsr(s15, r7); vmov(s15, r7);
fsitod(d7, s15); vcvt(d7, s15);
fmrrd(outLowReg, outHighReg, d7); vmov(outLowReg, outHighReg, d7);
} }

6
deps/v8/src/arm/macro-assembler-arm.h

@ -64,6 +64,9 @@ class MacroAssembler: public Assembler {
void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al); void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Ret(Condition cond = al); void Ret(Condition cond = al);
void Drop(int stack_elements, Condition cond = al);
void Call(Label* target);
void Move(Register dst, Handle<Object> value);
// Jumps to the label at the index given by the Smi in "index". // Jumps to the label at the index given by the Smi in "index".
void SmiJumpTable(Register index, Vector<Label*> targets); void SmiJumpTable(Register index, Vector<Label*> targets);
// Load an object from the root table. // Load an object from the root table.
@ -148,6 +151,9 @@ class MacroAssembler: public Assembler {
// On exit, r0 contains TOS (code slot). // On exit, r0 contains TOS (code slot).
void PushTryHandler(CodeLocation try_location, HandlerType type); void PushTryHandler(CodeLocation try_location, HandlerType type);
// Unlink the stack handler on top of the stack from the try handler chain.
// Must preserve the result register.
void PopTryHandler();
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Inline caching support // Inline caching support

20
deps/v8/src/arm/simulator-arm.cc

@ -1893,14 +1893,14 @@ void Simulator::DecodeUnconditional(Instr* instr) {
// void Simulator::DecodeTypeVFP(Instr* instr) // void Simulator::DecodeTypeVFP(Instr* instr)
// The Following ARMv7 VFPv instructions are currently supported. // The Following ARMv7 VFPv instructions are currently supported.
// fmsr :Sn = Rt // vmov :Sn = Rt
// fmrs :Rt = Sn // vmov :Rt = Sn
// fsitod: Dd = Sm // vcvt: Dd = Sm
// ftosid: Sd = Dm // vcvt: Sd = Dm
// Dd = faddd(Dn, Dm) // Dd = vadd(Dn, Dm)
// Dd = fsubd(Dn, Dm) // Dd = vsub(Dn, Dm)
// Dd = fmuld(Dn, Dm) // Dd = vmul(Dn, Dm)
// Dd = fdivd(Dn, Dm) // Dd = vdiv(Dn, Dm)
// vcmp(Dd, Dm) // vcmp(Dd, Dm)
// VMRS // VMRS
void Simulator::DecodeTypeVFP(Instr* instr) { void Simulator::DecodeTypeVFP(Instr* instr) {
@ -2020,8 +2020,8 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
// void Simulator::DecodeType6CoprocessorIns(Instr* instr) // void Simulator::DecodeType6CoprocessorIns(Instr* instr)
// Decode Type 6 coprocessor instructions. // Decode Type 6 coprocessor instructions.
// Dm = fmdrr(Rt, Rt2) // Dm = vmov(Rt, Rt2)
// <Rt, Rt2> = fmrrd(Dm) // <Rt, Rt2> = vmov(Dm)
void Simulator::DecodeType6CoprocessorIns(Instr* instr) { void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6)); ASSERT((instr->TypeField() == 6));

47
deps/v8/src/arm/stub-cache-arm.cc

@ -446,7 +446,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
} }
void StubCompiler::GenerateLoadCallback(JSObject* object, bool StubCompiler::GenerateLoadCallback(JSObject* object,
JSObject* holder, JSObject* holder,
Register receiver, Register receiver,
Register name_reg, Register name_reg,
@ -454,7 +454,8 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
Register scratch2, Register scratch2,
AccessorInfo* callback, AccessorInfo* callback,
String* name, String* name,
Label* miss) { Label* miss,
Failure** failure) {
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ tst(receiver, Operand(kSmiTagMask)); __ tst(receiver, Operand(kSmiTagMask));
__ b(eq, miss); __ b(eq, miss);
@ -476,6 +477,8 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
ExternalReference load_callback_property = ExternalReference load_callback_property =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallRuntime(load_callback_property, 5, 1); __ TailCallRuntime(load_callback_property, 5, 1);
return true;
} }
@ -774,8 +777,26 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
__ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset)); __ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function. // Check that the cell contains the same function.
__ cmp(r1, Operand(Handle<JSFunction>(function))); if (Heap::InNewSpace(function)) {
__ b(ne, &miss); // We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
// function can all use this call IC. Before we load through the
// function, we have to verify that it still is a function.
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &miss);
__ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
__ b(ne, &miss);
// Check the shared function info. Make sure it hasn't changed.
__ mov(r3, Operand(Handle<SharedFunctionInfo>(function->shared())));
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ cmp(r2, r3);
__ b(ne, &miss);
} else {
__ cmp(r1, Operand(Handle<JSFunction>(function)));
__ b(ne, &miss);
}
// Patch the receiver on the stack with the global proxy if // Patch the receiver on the stack with the global proxy if
// necessary. // necessary.
@ -1003,10 +1024,10 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
} }
Object* LoadStubCompiler::CompileLoadCallback(JSObject* object, Object* LoadStubCompiler::CompileLoadCallback(String* name,
JSObject* object,
JSObject* holder, JSObject* holder,
AccessorInfo* callback, AccessorInfo* callback) {
String* name) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r2 : name // -- r2 : name
// -- lr : return address // -- lr : return address
@ -1015,7 +1036,11 @@ Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
Label miss; Label miss;
__ ldr(r0, MemOperand(sp, 0)); __ ldr(r0, MemOperand(sp, 0));
GenerateLoadCallback(object, holder, r0, r2, r3, r1, callback, name, &miss); Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1,
callback, name, &miss, &failure);
if (!success) return failure;
__ bind(&miss); __ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC); GenerateLoadMiss(masm(), Code::LOAD_IC);
@ -1168,7 +1193,11 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
__ cmp(r2, Operand(Handle<String>(name))); __ cmp(r2, Operand(Handle<String>(name)));
__ b(ne, &miss); __ b(ne, &miss);
GenerateLoadCallback(receiver, holder, r0, r2, r3, r1, callback, name, &miss); Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(receiver, holder, r0, r2, r3, r1,
callback, name, &miss, &failure);
if (!success) return failure;
__ bind(&miss); __ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);

3
deps/v8/src/arm/virtual-frame-arm.cc

@ -243,11 +243,8 @@ void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id, void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags, InvokeJSFlags flags,
Result* arg_count_register,
int arg_count) { int arg_count) {
ASSERT(arg_count_register->reg().is(r0));
PrepareForCall(arg_count, arg_count); PrepareForCall(arg_count, arg_count);
arg_count_register->Unuse();
__ InvokeBuiltin(id, flags); __ InvokeBuiltin(id, flags);
} }

1
deps/v8/src/arm/virtual-frame-arm.h

@ -305,7 +305,6 @@ class VirtualFrame : public ZoneObject {
// removes from) the stack. // removes from) the stack.
void InvokeBuiltin(Builtins::JavaScript id, void InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flag, InvokeJSFlags flag,
Result* arg_count_register,
int arg_count); int arg_count);
// Call into an IC stub given the number of arguments it removes // Call into an IC stub given the number of arguments it removes

10
deps/v8/src/assembler.cc

@ -573,6 +573,16 @@ ExternalReference ExternalReference::random_positive_smi_function() {
} }
ExternalReference ExternalReference::keyed_lookup_cache_keys() {
return ExternalReference(KeyedLookupCache::keys_address());
}
ExternalReference ExternalReference::keyed_lookup_cache_field_offsets() {
return ExternalReference(KeyedLookupCache::field_offsets_address());
}
ExternalReference ExternalReference::the_hole_value_location() { ExternalReference ExternalReference::the_hole_value_location() {
return ExternalReference(Factory::the_hole_value().location()); return ExternalReference(Factory::the_hole_value().location());
} }

4
deps/v8/src/assembler.h

@ -401,6 +401,10 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference builtin_passed_function(); static ExternalReference builtin_passed_function();
static ExternalReference random_positive_smi_function(); static ExternalReference random_positive_smi_function();
// Static data in the keyed lookup cache.
static ExternalReference keyed_lookup_cache_keys();
static ExternalReference keyed_lookup_cache_field_offsets();
// Static variable Factory::the_hole_value.location() // Static variable Factory::the_hole_value.location()
static ExternalReference the_hole_value_location(); static ExternalReference the_hole_value_location();

17
deps/v8/src/ast.h

@ -139,6 +139,7 @@ class AstNode: public ZoneObject {
virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; } virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
virtual ObjectLiteral* AsObjectLiteral() { return NULL; } virtual ObjectLiteral* AsObjectLiteral() { return NULL; }
virtual ArrayLiteral* AsArrayLiteral() { return NULL; } virtual ArrayLiteral* AsArrayLiteral() { return NULL; }
virtual CompareOperation* AsCompareOperation() { return NULL; }
}; };
@ -192,13 +193,13 @@ class Expression: public AstNode {
virtual void MarkAsStatement() { /* do nothing */ } virtual void MarkAsStatement() { /* do nothing */ }
// Static type information for this expression. // Static type information for this expression.
SmiAnalysis* type() { return &type_; } StaticType* type() { return &type_; }
Context context() { return context_; } Context context() { return context_; }
void set_context(Context context) { context_ = context; } void set_context(Context context) { context_ = context; }
private: private:
SmiAnalysis type_; StaticType type_;
Context context_; Context context_;
}; };
@ -1185,7 +1186,7 @@ class CountOperation: public Expression {
class CompareOperation: public Expression { class CompareOperation: public Expression {
public: public:
CompareOperation(Token::Value op, Expression* left, Expression* right) CompareOperation(Token::Value op, Expression* left, Expression* right)
: op_(op), left_(left), right_(right) { : op_(op), left_(left), right_(right), is_for_loop_condition_(false) {
ASSERT(Token::IsCompareOp(op)); ASSERT(Token::IsCompareOp(op));
} }
@ -1195,10 +1196,18 @@ class CompareOperation: public Expression {
Expression* left() const { return left_; } Expression* left() const { return left_; }
Expression* right() const { return right_; } Expression* right() const { return right_; }
// Accessors for flag whether this compare operation is hanging of a for loop.
bool is_for_loop_condition() const { return is_for_loop_condition_; }
void set_is_for_loop_condition() { is_for_loop_condition_ = true; }
// Type testing & conversion
virtual CompareOperation* AsCompareOperation() { return this; }
private: private:
Token::Value op_; Token::Value op_;
Expression* left_; Expression* left_;
Expression* right_; Expression* right_;
bool is_for_loop_condition_;
}; };
@ -1241,6 +1250,8 @@ class Assignment: public Expression {
Expression* target() const { return target_; } Expression* target() const { return target_; }
Expression* value() const { return value_; } Expression* value() const { return value_; }
int position() { return pos_; } int position() { return pos_; }
// This check relies on the definition order of token in token.h.
bool is_compound() const { return op() > Token::ASSIGN; }
// An initialization block is a series of statments of the form // An initialization block is a series of statments of the form
// x.y.z.a = ...; x.y.z.b = ...; etc. The parser marks the beginning and // x.y.z.a = ...; x.y.z.b = ...; etc. The parser marks the beginning and

26
deps/v8/src/bootstrapper.cc

@ -95,6 +95,8 @@ static SourceCodeCache natives_cache(Script::TYPE_NATIVE);
static SourceCodeCache extensions_cache(Script::TYPE_EXTENSION); static SourceCodeCache extensions_cache(Script::TYPE_EXTENSION);
// This is for delete, not delete[]. // This is for delete, not delete[].
static List<char*>* delete_these_non_arrays_on_tear_down = NULL; static List<char*>* delete_these_non_arrays_on_tear_down = NULL;
// This is for delete[]
static List<char*>* delete_these_arrays_on_tear_down = NULL;
NativesExternalStringResource::NativesExternalStringResource(const char* source) NativesExternalStringResource::NativesExternalStringResource(const char* source)
@ -150,17 +152,41 @@ void Bootstrapper::Initialize(bool create_heap_objects) {
} }
char* Bootstrapper::AllocateAutoDeletedArray(int bytes) {
char* memory = new char[bytes];
if (memory != NULL) {
if (delete_these_arrays_on_tear_down == NULL) {
delete_these_arrays_on_tear_down = new List<char*>(2);
}
delete_these_arrays_on_tear_down->Add(memory);
}
return memory;
}
void Bootstrapper::TearDown() { void Bootstrapper::TearDown() {
if (delete_these_non_arrays_on_tear_down != NULL) { if (delete_these_non_arrays_on_tear_down != NULL) {
int len = delete_these_non_arrays_on_tear_down->length(); int len = delete_these_non_arrays_on_tear_down->length();
ASSERT(len < 20); // Don't use this mechanism for unbounded allocations. ASSERT(len < 20); // Don't use this mechanism for unbounded allocations.
for (int i = 0; i < len; i++) { for (int i = 0; i < len; i++) {
delete delete_these_non_arrays_on_tear_down->at(i); delete delete_these_non_arrays_on_tear_down->at(i);
delete_these_non_arrays_on_tear_down->at(i) = NULL;
} }
delete delete_these_non_arrays_on_tear_down; delete delete_these_non_arrays_on_tear_down;
delete_these_non_arrays_on_tear_down = NULL; delete_these_non_arrays_on_tear_down = NULL;
} }
if (delete_these_arrays_on_tear_down != NULL) {
int len = delete_these_arrays_on_tear_down->length();
ASSERT(len < 1000); // Don't use this mechanism for unbounded allocations.
for (int i = 0; i < len; i++) {
delete[] delete_these_arrays_on_tear_down->at(i);
delete_these_arrays_on_tear_down->at(i) = NULL;
}
delete delete_these_arrays_on_tear_down;
delete_these_arrays_on_tear_down = NULL;
}
natives_cache.Initialize(false); // Yes, symmetrical natives_cache.Initialize(false); // Yes, symmetrical
extensions_cache.Initialize(false); extensions_cache.Initialize(false);
} }

4
deps/v8/src/bootstrapper.h

@ -74,6 +74,10 @@ class Bootstrapper : public AllStatic {
static char* ArchiveState(char* to); static char* ArchiveState(char* to);
static char* RestoreState(char* from); static char* RestoreState(char* from);
static void FreeThreadResources(); static void FreeThreadResources();
// This will allocate a char array that is deleted when V8 is shut down.
// It should only be used for strictly finite allocations.
static char* AllocateAutoDeletedArray(int bytes);
}; };

143
deps/v8/src/code-stubs.cc

@ -35,82 +35,117 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
Handle<Code> CodeStub::GetCode() { bool CodeStub::FindCodeInCache(Code** code_out) {
bool custom_cache = has_custom_cache(); if (has_custom_cache()) return GetCustomCache(code_out);
int index = Heap::code_stubs()->FindEntry(GetKey());
int index = 0; if (index != NumberDictionary::kNotFound) {
uint32_t key = 0; *code_out = Code::cast(Heap::code_stubs()->ValueAt(index));
if (custom_cache) { return true;
Code* cached;
if (GetCustomCache(&cached)) {
return Handle<Code>(cached);
} else {
index = NumberDictionary::kNotFound;
}
} else {
key = GetKey();
index = Heap::code_stubs()->FindEntry(key);
if (index != NumberDictionary::kNotFound)
return Handle<Code>(Code::cast(Heap::code_stubs()->ValueAt(index)));
} }
return false;
}
Code* result;
{
v8::HandleScope scope;
// Update the static counter each time a new code stub is generated. void CodeStub::GenerateCode(MacroAssembler* masm) {
Counters::code_stubs.Increment(); // Update the static counter each time a new code stub is generated.
Counters::code_stubs.Increment();
// Nested stubs are not allowed for leafs.
masm->set_allow_stub_calls(AllowsStubCalls());
// Generate the code for the stub.
masm->set_generating_stub(true);
Generate(masm);
}
// Generate the new code.
MacroAssembler masm(NULL, 256);
// Nested stubs are not allowed for leafs. void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
masm.set_allow_stub_calls(AllowsStubCalls()); code->set_major_key(MajorKey());
// Generate the code for the stub. // Add unresolved entries in the code to the fixup list.
masm.set_generating_stub(true); Bootstrapper::AddFixup(code, masm);
Generate(&masm);
// Create the code object. LOG(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
CodeDesc desc; Counters::total_stubs_code_size.Increment(code->instruction_size());
masm.GetCode(&desc);
// Copy the generated code into a heap object, and store the major key.
Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop());
Handle<Code> code = Factory::NewCode(desc, NULL, flags, masm.CodeObject());
code->set_major_key(MajorKey());
// Add unresolved entries in the code to the fixup list.
Bootstrapper::AddFixup(*code, &masm);
LOG(CodeCreateEvent(Logger::STUB_TAG, *code, GetName()));
Counters::total_stubs_code_size.Increment(code->instruction_size());
#ifdef ENABLE_DISASSEMBLER #ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) { if (FLAG_print_code_stubs) {
#ifdef DEBUG #ifdef DEBUG
Print(); Print();
#endif #endif
code->Disassemble(GetName()); code->Disassemble(GetName());
PrintF("\n"); PrintF("\n");
} }
#endif #endif
}
Handle<Code> CodeStub::GetCode() {
Code* code;
if (!FindCodeInCache(&code)) {
v8::HandleScope scope;
// Generate the new code.
MacroAssembler masm(NULL, 256);
GenerateCode(&masm);
// Create the code object.
CodeDesc desc;
masm.GetCode(&desc);
if (custom_cache) { // Copy the generated code into a heap object.
SetCustomCache(*code); Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop());
Handle<Code> new_object =
Factory::NewCode(desc, NULL, flags, masm.CodeObject());
RecordCodeGeneration(*new_object, &masm);
if (has_custom_cache()) {
SetCustomCache(*new_object);
} else { } else {
// Update the dictionary and the root in Heap. // Update the dictionary and the root in Heap.
Handle<NumberDictionary> dict = Handle<NumberDictionary> dict =
Factory::DictionaryAtNumberPut( Factory::DictionaryAtNumberPut(
Handle<NumberDictionary>(Heap::code_stubs()), Handle<NumberDictionary>(Heap::code_stubs()),
key, GetKey(),
code); new_object);
Heap::public_set_code_stubs(*dict); Heap::public_set_code_stubs(*dict);
} }
result = *code; code = *new_object;
}
return Handle<Code>(code);
}
Object* CodeStub::TryGetCode() {
Code* code;
if (!FindCodeInCache(&code)) {
// Generate the new code.
MacroAssembler masm(NULL, 256);
GenerateCode(&masm);
// Create the code object.
CodeDesc desc;
masm.GetCode(&desc);
// Try to copy the generated code into a heap object.
Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop());
Object* new_object =
Heap::CreateCode(desc, NULL, flags, masm.CodeObject());
if (new_object->IsFailure()) return new_object;
code = Code::cast(new_object);
RecordCodeGeneration(code, &masm);
if (has_custom_cache()) {
SetCustomCache(code);
} else {
// Try to update the code cache but do not fail if unable.
new_object = Heap::code_stubs()->AtNumberPut(GetKey(), code);
if (!new_object->IsFailure()) {
Heap::public_set_code_stubs(NumberDictionary::cast(new_object));
}
}
} }
return Handle<Code>(result); return code;
} }

19
deps/v8/src/code-stubs.h

@ -43,6 +43,9 @@ namespace internal {
V(ConvertToDouble) \ V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \ V(WriteInt32ToHeapNumber) \
V(StackCheck) \ V(StackCheck) \
V(FastNewClosure) \
V(FastNewContext) \
V(FastCloneShallowArray) \
V(UnarySub) \ V(UnarySub) \
V(RevertToNumber) \ V(RevertToNumber) \
V(ToBoolean) \ V(ToBoolean) \
@ -83,6 +86,11 @@ class CodeStub BASE_EMBEDDED {
// Retrieve the code for the stub. Generate the code if needed. // Retrieve the code for the stub. Generate the code if needed.
Handle<Code> GetCode(); Handle<Code> GetCode();
// Retrieve the code for the stub if already generated. Do not
// generate the code if not already generated and instead return a
// retry after GC Failure object.
Object* TryGetCode();
static Major MajorKeyFromKey(uint32_t key) { static Major MajorKeyFromKey(uint32_t key) {
return static_cast<Major>(MajorKeyBits::decode(key)); return static_cast<Major>(MajorKeyBits::decode(key));
}; };
@ -104,9 +112,20 @@ class CodeStub BASE_EMBEDDED {
static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits; static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
private: private:
// Lookup the code in the (possibly custom) cache.
bool FindCodeInCache(Code** code_out);
// Nonvirtual wrapper around the stub-specific Generate function. Call
// this function to set up the macro assembler and generate the code.
void GenerateCode(MacroAssembler* masm);
// Generates the assembler code for the stub. // Generates the assembler code for the stub.
virtual void Generate(MacroAssembler* masm) = 0; virtual void Generate(MacroAssembler* masm) = 0;
// Perform bookkeeping required after code generation when stub code is
// initially generated.
void RecordCodeGeneration(Code* code, MacroAssembler* masm);
// Returns information for computing the number key. // Returns information for computing the number key.
virtual Major MajorKey() = 0; virtual Major MajorKey() = 0;
virtual int MinorKey() = 0; virtual int MinorKey() = 0;

49
deps/v8/src/codegen.h

@ -233,6 +233,55 @@ class StackCheckStub : public CodeStub {
}; };
class FastNewClosureStub : public CodeStub {
public:
void Generate(MacroAssembler* masm);
private:
const char* GetName() { return "FastNewClosureStub"; }
Major MajorKey() { return FastNewClosure; }
int MinorKey() { return 0; }
};
class FastNewContextStub : public CodeStub {
public:
static const int kMaximumSlots = 64;
explicit FastNewContextStub(int slots) : slots_(slots) {
ASSERT(slots_ > 0 && slots <= kMaximumSlots);
}
void Generate(MacroAssembler* masm);
private:
int slots_;
const char* GetName() { return "FastNewContextStub"; }
Major MajorKey() { return FastNewContext; }
int MinorKey() { return slots_; }
};
class FastCloneShallowArrayStub : public CodeStub {
public:
static const int kMaximumLength = 8;
explicit FastCloneShallowArrayStub(int length) : length_(length) {
ASSERT(length >= 0 && length <= kMaximumLength);
}
void Generate(MacroAssembler* masm);
private:
int length_;
const char* GetName() { return "FastCloneShallowArrayStub"; }
Major MajorKey() { return FastCloneShallowArray; }
int MinorKey() { return length_; }
};
class InstanceofStub: public CodeStub { class InstanceofStub: public CodeStub {
public: public:
InstanceofStub() { } InstanceofStub() { }

78
deps/v8/src/compiler.cc

@ -56,6 +56,8 @@ class CodeGenSelector: public AstVisitor {
private: private:
// Visit an expression in a given expression context. // Visit an expression in a given expression context.
void ProcessExpression(Expression* expr, Expression::Context context) { void ProcessExpression(Expression* expr, Expression::Context context) {
ASSERT(expr->context() == Expression::kUninitialized ||
expr->context() == context);
Expression::Context saved = context_; Expression::Context saved = context_;
context_ = context; context_ = context;
Visit(expr); Visit(expr);
@ -596,7 +598,7 @@ CodeGenSelector::CodeGenTag CodeGenSelector::Select(FunctionLiteral* fun) {
Slot* slot = scope->parameter(i)->slot(); Slot* slot = scope->parameter(i)->slot();
if (slot != NULL && slot->type() == Slot::CONTEXT) { if (slot != NULL && slot->type() == Slot::CONTEXT) {
if (FLAG_trace_bailout) { if (FLAG_trace_bailout) {
PrintF("function has context-allocated parameters"); PrintF("Function has context-allocated parameters.\n");
} }
return NORMAL; return NORMAL;
} }
@ -645,6 +647,18 @@ void CodeGenSelector::VisitStatements(ZoneList<Statement*>* stmts) {
void CodeGenSelector::VisitDeclaration(Declaration* decl) { void CodeGenSelector::VisitDeclaration(Declaration* decl) {
Property* prop = decl->proxy()->AsProperty();
if (prop != NULL) {
// Property rewrites are shared, ensure we are not changing its
// expression context state.
ASSERT(prop->obj()->context() == Expression::kUninitialized ||
prop->obj()->context() == Expression::kValue);
ASSERT(prop->key()->context() == Expression::kUninitialized ||
prop->key()->context() == Expression::kValue);
ProcessExpression(prop->obj(), Expression::kValue);
ProcessExpression(prop->key(), Expression::kValue);
}
if (decl->fun() != NULL) { if (decl->fun() != NULL) {
ProcessExpression(decl->fun(), Expression::kValue); ProcessExpression(decl->fun(), Expression::kValue);
} }
@ -676,12 +690,10 @@ void CodeGenSelector::VisitIfStatement(IfStatement* stmt) {
void CodeGenSelector::VisitContinueStatement(ContinueStatement* stmt) { void CodeGenSelector::VisitContinueStatement(ContinueStatement* stmt) {
BAILOUT("ContinueStatement");
} }
void CodeGenSelector::VisitBreakStatement(BreakStatement* stmt) { void CodeGenSelector::VisitBreakStatement(BreakStatement* stmt) {
BAILOUT("BreakStatement");
} }
@ -691,12 +703,12 @@ void CodeGenSelector::VisitReturnStatement(ReturnStatement* stmt) {
void CodeGenSelector::VisitWithEnterStatement(WithEnterStatement* stmt) { void CodeGenSelector::VisitWithEnterStatement(WithEnterStatement* stmt) {
BAILOUT("WithEnterStatement"); ProcessExpression(stmt->expression(), Expression::kValue);
} }
void CodeGenSelector::VisitWithExitStatement(WithExitStatement* stmt) { void CodeGenSelector::VisitWithExitStatement(WithExitStatement* stmt) {
BAILOUT("WithExitStatement"); // Supported.
} }
@ -724,21 +736,7 @@ void CodeGenSelector::VisitWhileStatement(WhileStatement* stmt) {
void CodeGenSelector::VisitForStatement(ForStatement* stmt) { void CodeGenSelector::VisitForStatement(ForStatement* stmt) {
// We do not handle loops with breaks or continue statements in their BAILOUT("ForStatement");
// body. We will bailout when we hit those statements in the body.
if (stmt->init() != NULL) {
Visit(stmt->init());
CHECK_BAILOUT;
}
if (stmt->cond() != NULL) {
ProcessExpression(stmt->cond(), Expression::kTest);
CHECK_BAILOUT;
}
Visit(stmt->body());
if (stmt->next() != NULL) {
CHECK_BAILOUT;
Visit(stmt->next());
}
} }
@ -753,7 +751,9 @@ void CodeGenSelector::VisitTryCatchStatement(TryCatchStatement* stmt) {
void CodeGenSelector::VisitTryFinallyStatement(TryFinallyStatement* stmt) { void CodeGenSelector::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
BAILOUT("TryFinallyStatement"); Visit(stmt->try_block());
CHECK_BAILOUT;
Visit(stmt->finally_block());
} }
@ -885,34 +885,22 @@ void CodeGenSelector::VisitAssignment(Assignment* expr) {
// non-context (stack-allocated) locals, and global variables. // non-context (stack-allocated) locals, and global variables.
Token::Value op = expr->op(); Token::Value op = expr->op();
if (op == Token::INIT_CONST) BAILOUT("initialize constant"); if (op == Token::INIT_CONST) BAILOUT("initialize constant");
if (op != Token::ASSIGN && op != Token::INIT_VAR) {
BAILOUT("compound assignment");
}
Variable* var = expr->target()->AsVariableProxy()->AsVariable(); Variable* var = expr->target()->AsVariableProxy()->AsVariable();
Property* prop = expr->target()->AsProperty(); Property* prop = expr->target()->AsProperty();
ASSERT(var == NULL || prop == NULL);
if (var != NULL) { if (var != NULL) {
// All global variables are supported. // All global variables are supported.
if (!var->is_global()) { if (!var->is_global()) {
if (var->slot() == NULL) { ASSERT(var->slot() != NULL);
Property* property = var->AsProperty(); Slot::Type type = var->slot()->type();
if (property == NULL) { if (type == Slot::LOOKUP) {
BAILOUT("non-global/non-slot/non-property assignment"); BAILOUT("Lookup slot");
}
if (property->obj()->AsSlot() == NULL) {
BAILOUT("variable rewritten to property non slot object assignment");
}
if (property->key()->AsLiteral() == NULL) {
BAILOUT("variable rewritten to property non literal key assignment");
}
} else {
Slot::Type type = var->slot()->type();
if (type == Slot::LOOKUP) {
BAILOUT("Lookup slot");
}
} }
} }
} else if (prop != NULL) { } else if (prop != NULL) {
ASSERT(prop->obj()->context() == Expression::kUninitialized ||
prop->obj()->context() == Expression::kValue);
ProcessExpression(prop->obj(), Expression::kValue); ProcessExpression(prop->obj(), Expression::kValue);
CHECK_BAILOUT; CHECK_BAILOUT;
// We will only visit the key during code generation for keyed property // We will only visit the key during code generation for keyed property
@ -923,6 +911,8 @@ void CodeGenSelector::VisitAssignment(Assignment* expr) {
if (lit == NULL || if (lit == NULL ||
!lit->handle()->IsSymbol() || !lit->handle()->IsSymbol() ||
String::cast(*(lit->handle()))->AsArrayIndex(&ignored)) { String::cast(*(lit->handle()))->AsArrayIndex(&ignored)) {
ASSERT(prop->key()->context() == Expression::kUninitialized ||
prop->key()->context() == Expression::kValue);
ProcessExpression(prop->key(), Expression::kValue); ProcessExpression(prop->key(), Expression::kValue);
CHECK_BAILOUT; CHECK_BAILOUT;
} }
@ -1111,14 +1101,14 @@ void CodeGenSelector::VisitBinaryOperation(BinaryOperation* expr) {
void CodeGenSelector::VisitCompareOperation(CompareOperation* expr) { void CodeGenSelector::VisitCompareOperation(CompareOperation* expr) {
ProcessExpression(expr->left(), Expression::kValue); ProcessExpression(expr->left(), Expression::kValue);
CHECK_BAILOUT; CHECK_BAILOUT;
ProcessExpression(expr->right(), Expression::kValue); ProcessExpression(expr->right(), Expression::kValue);
} }
void CodeGenSelector::VisitThisFunction(ThisFunction* expr) { void CodeGenSelector::VisitThisFunction(ThisFunction* expr) {
BAILOUT("ThisFunction"); // ThisFunction is supported.
} }
#undef BAILOUT #undef BAILOUT

6
deps/v8/src/execution.cc

@ -30,6 +30,7 @@
#include "v8.h" #include "v8.h"
#include "api.h" #include "api.h"
#include "bootstrapper.h"
#include "codegen-inl.h" #include "codegen-inl.h"
#include "debug.h" #include "debug.h"
#include "simulator.h" #include "simulator.h"
@ -607,6 +608,11 @@ Object* Execution::DebugBreakHelper() {
return Heap::undefined_value(); return Heap::undefined_value();
} }
// Ignore debug break during bootstrapping.
if (Bootstrapper::IsActive()) {
return Heap::undefined_value();
}
{ {
JavaScriptFrameIterator it; JavaScriptFrameIterator it;
ASSERT(!it.done()); ASSERT(!it.done());

15
deps/v8/src/factory.cc

@ -284,7 +284,8 @@ Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
Handle<JSFunction> Factory::BaseNewFunctionFromBoilerplate( Handle<JSFunction> Factory::BaseNewFunctionFromBoilerplate(
Handle<JSFunction> boilerplate, Handle<JSFunction> boilerplate,
Handle<Map> function_map) { Handle<Map> function_map,
PretenureFlag pretenure) {
ASSERT(boilerplate->IsBoilerplate()); ASSERT(boilerplate->IsBoilerplate());
ASSERT(!boilerplate->has_initial_map()); ASSERT(!boilerplate->has_initial_map());
ASSERT(!boilerplate->has_prototype()); ASSERT(!boilerplate->has_prototype());
@ -292,20 +293,22 @@ Handle<JSFunction> Factory::BaseNewFunctionFromBoilerplate(
ASSERT(boilerplate->elements() == Heap::empty_fixed_array()); ASSERT(boilerplate->elements() == Heap::empty_fixed_array());
CALL_HEAP_FUNCTION(Heap::AllocateFunction(*function_map, CALL_HEAP_FUNCTION(Heap::AllocateFunction(*function_map,
boilerplate->shared(), boilerplate->shared(),
Heap::the_hole_value()), Heap::the_hole_value(),
pretenure),
JSFunction); JSFunction);
} }
Handle<JSFunction> Factory::NewFunctionFromBoilerplate( Handle<JSFunction> Factory::NewFunctionFromBoilerplate(
Handle<JSFunction> boilerplate, Handle<JSFunction> boilerplate,
Handle<Context> context) { Handle<Context> context,
Handle<JSFunction> result = PretenureFlag pretenure) {
BaseNewFunctionFromBoilerplate(boilerplate, Top::function_map()); Handle<JSFunction> result = BaseNewFunctionFromBoilerplate(
boilerplate, Top::function_map(), pretenure);
result->set_context(*context); result->set_context(*context);
int number_of_literals = boilerplate->NumberOfLiterals(); int number_of_literals = boilerplate->NumberOfLiterals();
Handle<FixedArray> literals = Handle<FixedArray> literals =
Factory::NewFixedArray(number_of_literals, TENURED); Factory::NewFixedArray(number_of_literals, pretenure);
if (number_of_literals > 0) { if (number_of_literals > 0) {
// Store the object, regexp and array functions in the literals // Store the object, regexp and array functions in the literals
// array prefix. These functions will be used when creating // array prefix. These functions will be used when creating

6
deps/v8/src/factory.h

@ -219,7 +219,8 @@ class Factory : public AllStatic {
static Handle<JSFunction> NewFunctionFromBoilerplate( static Handle<JSFunction> NewFunctionFromBoilerplate(
Handle<JSFunction> boilerplate, Handle<JSFunction> boilerplate,
Handle<Context> context); Handle<Context> context,
PretenureFlag pretenure = TENURED);
static Handle<Code> NewCode(const CodeDesc& desc, static Handle<Code> NewCode(const CodeDesc& desc,
ZoneScopeInfo* sinfo, ZoneScopeInfo* sinfo,
@ -374,7 +375,8 @@ class Factory : public AllStatic {
static Handle<JSFunction> BaseNewFunctionFromBoilerplate( static Handle<JSFunction> BaseNewFunctionFromBoilerplate(
Handle<JSFunction> boilerplate, Handle<JSFunction> boilerplate,
Handle<Map> function_map); Handle<Map> function_map,
PretenureFlag pretenure);
// Create a new map cache. // Create a new map cache.
static Handle<MapCache> NewMapCache(int at_least_space_for); static Handle<MapCache> NewMapCache(int at_least_space_for);

308
deps/v8/src/fast-codegen.cc

@ -36,7 +36,7 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#define __ ACCESS_MASM(masm_) #define __ ACCESS_MASM(masm())
Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun, Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
Handle<Script> script, Handle<Script> script,
@ -232,8 +232,10 @@ void FastCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
void FastCodeGenerator::VisitBlock(Block* stmt) { void FastCodeGenerator::VisitBlock(Block* stmt) {
Comment cmnt(masm_, "[ Block"); Comment cmnt(masm_, "[ Block");
Breakable nested_statement(this, stmt);
SetStatementPosition(stmt); SetStatementPosition(stmt);
VisitStatements(stmt->statements()); VisitStatements(stmt->statements());
__ bind(nested_statement.break_target());
} }
@ -278,22 +280,88 @@ void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) { void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
UNREACHABLE(); Comment cmnt(masm_, "[ ContinueStatement");
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
while (!current->IsContinueTarget(stmt->target())) {
stack_depth = current->Exit(stack_depth);
current = current->outer();
}
__ Drop(stack_depth);
Iteration* loop = current->AsIteration();
__ jmp(loop->continue_target());
} }
void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) { void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
UNREACHABLE(); Comment cmnt(masm_, "[ BreakStatement");
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
while (!current->IsBreakTarget(stmt->target())) {
stack_depth = current->Exit(stack_depth);
current = current->outer();
}
__ Drop(stack_depth);
Breakable* target = current->AsBreakable();
__ jmp(target->break_target());
} }
void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
Expression* expr = stmt->expression();
// Complete the statement based on the type of the subexpression.
if (expr->AsLiteral() != NULL) {
__ Move(result_register(), expr->AsLiteral()->handle());
} else {
ASSERT_EQ(Expression::kValue, expr->context());
Visit(expr);
__ pop(result_register());
}
// Exit all nested statements.
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
while (current != NULL) {
stack_depth = current->Exit(stack_depth);
current = current->outer();
}
__ Drop(stack_depth);
EmitReturnSequence(stmt->statement_pos());
}
void FastCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) { void FastCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
UNREACHABLE(); Comment cmnt(masm_, "[ WithEnterStatement");
SetStatementPosition(stmt);
Visit(stmt->expression());
if (stmt->is_catch_block()) {
__ CallRuntime(Runtime::kPushCatchContext, 1);
} else {
__ CallRuntime(Runtime::kPushContext, 1);
}
// Both runtime calls return the new context in both the context and the
// result registers.
// Update local stack frame context field.
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
} }
void FastCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) { void FastCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
UNREACHABLE(); Comment cmnt(masm_, "[ WithExitStatement");
SetStatementPosition(stmt);
// Pop context.
LoadContextField(context_register(), Context::PREVIOUS_INDEX);
// Update local stack frame context field.
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
} }
@ -304,8 +372,10 @@ void FastCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) { void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
Comment cmnt(masm_, "[ DoWhileStatement"); Comment cmnt(masm_, "[ DoWhileStatement");
Label body, stack_limit_hit, stack_check_success;
Iteration loop_statement(this, stmt);
increment_loop_depth(); increment_loop_depth();
Label body, exit, stack_limit_hit, stack_check_success;
__ bind(&body); __ bind(&body);
Visit(stmt->body()); Visit(stmt->body());
@ -316,10 +386,11 @@ void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
// We are not in an expression context because we have been compiling // We are not in an expression context because we have been compiling
// statements. Set up a test expression context for the condition. // statements. Set up a test expression context for the condition.
__ bind(loop_statement.continue_target());
ASSERT_EQ(NULL, true_label_); ASSERT_EQ(NULL, true_label_);
ASSERT_EQ(NULL, false_label_); ASSERT_EQ(NULL, false_label_);
true_label_ = &body; true_label_ = &body;
false_label_ = &exit; false_label_ = loop_statement.break_target();
ASSERT(stmt->cond()->context() == Expression::kTest); ASSERT(stmt->cond()->context() == Expression::kTest);
Visit(stmt->cond()); Visit(stmt->cond());
true_label_ = NULL; true_label_ = NULL;
@ -330,7 +401,7 @@ void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
__ CallStub(&stack_stub); __ CallStub(&stack_stub);
__ jmp(&stack_check_success); __ jmp(&stack_check_success);
__ bind(&exit); __ bind(loop_statement.break_target());
decrement_loop_depth(); decrement_loop_depth();
} }
@ -338,16 +409,18 @@ void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) { void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
Comment cmnt(masm_, "[ WhileStatement"); Comment cmnt(masm_, "[ WhileStatement");
Label body, stack_limit_hit, stack_check_success;
Iteration loop_statement(this, stmt);
increment_loop_depth(); increment_loop_depth();
Label test, body, exit, stack_limit_hit, stack_check_success;
// Emit the test at the bottom of the loop. // Emit the test at the bottom of the loop.
__ jmp(&test); __ jmp(loop_statement.continue_target());
__ bind(&body); __ bind(&body);
Visit(stmt->body()); Visit(stmt->body());
__ bind(&test); __ bind(loop_statement.continue_target());
// Check stack before looping. // Check stack before looping.
__ StackLimitCheck(&stack_limit_hit); __ StackLimitCheck(&stack_limit_hit);
__ bind(&stack_check_success); __ bind(&stack_check_success);
@ -357,7 +430,7 @@ void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
ASSERT_EQ(NULL, true_label_); ASSERT_EQ(NULL, true_label_);
ASSERT_EQ(NULL, false_label_); ASSERT_EQ(NULL, false_label_);
true_label_ = &body; true_label_ = &body;
false_label_ = &exit; false_label_ = loop_statement.break_target();
ASSERT(stmt->cond()->context() == Expression::kTest); ASSERT(stmt->cond()->context() == Expression::kTest);
Visit(stmt->cond()); Visit(stmt->cond());
true_label_ = NULL; true_label_ = NULL;
@ -368,55 +441,13 @@ void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
__ CallStub(&stack_stub); __ CallStub(&stack_stub);
__ jmp(&stack_check_success); __ jmp(&stack_check_success);
__ bind(&exit); __ bind(loop_statement.break_target());
decrement_loop_depth(); decrement_loop_depth();
} }
void FastCodeGenerator::VisitForStatement(ForStatement* stmt) { void FastCodeGenerator::VisitForStatement(ForStatement* stmt) {
Comment cmnt(masm_, "[ ForStatement"); UNREACHABLE();
Label test, body, exit, stack_limit_hit, stack_check_success;
if (stmt->init() != NULL) Visit(stmt->init());
increment_loop_depth();
// Emit the test at the bottom of the loop (even if empty).
__ jmp(&test);
__ bind(&body);
Visit(stmt->body());
// Check stack before looping.
__ StackLimitCheck(&stack_limit_hit);
__ bind(&stack_check_success);
if (stmt->next() != NULL) Visit(stmt->next());
__ bind(&test);
if (stmt->cond() == NULL) {
// For an empty test jump to the top of the loop.
__ jmp(&body);
} else {
// We are not in an expression context because we have been compiling
// statements. Set up a test expression context for the condition.
ASSERT_EQ(NULL, true_label_);
ASSERT_EQ(NULL, false_label_);
true_label_ = &body;
false_label_ = &exit;
ASSERT(stmt->cond()->context() == Expression::kTest);
Visit(stmt->cond());
true_label_ = NULL;
false_label_ = NULL;
}
__ bind(&stack_limit_hit);
StackCheckStub stack_stub;
__ CallStub(&stack_stub);
__ jmp(&stack_check_success);
__ bind(&exit);
decrement_loop_depth();
} }
@ -431,7 +462,63 @@ void FastCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) { void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
UNREACHABLE(); // Try finally is compiled by setting up a try-handler on the stack while
// executing the try body, and removing it again afterwards.
//
// The try-finally construct can enter the finally block in three ways:
// 1. By exiting the try-block normally. This removes the try-handler and
// calls the finally block code before continuing.
// 2. By exiting the try-block with a function-local control flow transfer
// (break/continue/return). The site of the, e.g., break removes the
// try handler and calls the finally block code before continuing
// its outward control transfer.
// 3. by exiting the try-block with a thrown exception.
// This can happen in nested function calls. It traverses the try-handler
// chaing and consumes the try-handler entry before jumping to the
// handler code. The handler code then calls the finally-block before
// rethrowing the exception.
//
// The finally block must assume a return address on top of the stack
// (or in the link register on ARM chips) and a value (return value or
// exception) in the result register (rax/eax/r0), both of which must
// be preserved. The return address isn't GC-safe, so it should be
// cooked before GC.
Label finally_entry;
Label try_handler_setup;
// Setup the try-handler chain. Use a call to
// Jump to try-handler setup and try-block code. Use call to put try-handler
// address on stack.
__ Call(&try_handler_setup);
// Try handler code. Return address of call is pushed on handler stack.
{
// This code is only executed during stack-handler traversal when an
// exception is thrown. The execption is in the result register, which
// is retained by the finally block.
// Call the finally block and then rethrow the exception.
__ Call(&finally_entry);
ThrowException();
}
__ bind(&finally_entry);
{
// Finally block implementation.
EnterFinallyBlock();
Finally finally_block(this);
Visit(stmt->finally_block());
ExitFinallyBlock(); // Return to the calling code.
}
__ bind(&try_handler_setup);
{
// Setup try handler (stack pointer registers).
__ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
TryFinally try_block(this, &finally_entry);
VisitStatements(stmt->try_block()->statements());
__ PopTryHandler();
}
// Execute the finally block on the way out.
__ Call(&finally_entry);
} }
@ -500,40 +587,79 @@ void FastCodeGenerator::VisitLiteral(Literal* expr) {
void FastCodeGenerator::VisitAssignment(Assignment* expr) { void FastCodeGenerator::VisitAssignment(Assignment* expr) {
Comment cmnt(masm_, "[ Assignment"); Comment cmnt(masm_, "[ Assignment");
ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
// Record source code position of the (possible) IC call. // Record source code position of the (possible) IC call.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
Expression* rhs = expr->value(); // Left-hand side can only be a property, a global or a (parameter or local)
// Left-hand side can only be a property, a global or a (parameter or // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
// local) slot. enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
Variable* var = expr->target()->AsVariableProxy()->AsVariable(); LhsKind assign_type = VARIABLE;
Property* prop = expr->target()->AsProperty(); Property* prop = expr->target()->AsProperty();
if (var != NULL) { // In case of a property we use the uninitialized expression context
Visit(rhs); // of the key to detect a named property.
ASSERT_EQ(Expression::kValue, rhs->context()); if (prop != NULL) {
EmitVariableAssignment(expr); assign_type = (prop->key()->context() == Expression::kUninitialized)
} else if (prop != NULL) { ? NAMED_PROPERTY
// Assignment to a property. : KEYED_PROPERTY;
Visit(prop->obj()); }
ASSERT_EQ(Expression::kValue, prop->obj()->context());
// Use the expression context of the key subexpression to detect whether // Evaluate LHS expression.
// we have decided to us a named or keyed IC. switch (assign_type) {
if (prop->key()->context() == Expression::kUninitialized) { case VARIABLE:
ASSERT(prop->key()->AsLiteral() != NULL); // Nothing to do here.
Visit(rhs); break;
ASSERT_EQ(Expression::kValue, rhs->context()); case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr); Visit(prop->obj());
} else { ASSERT_EQ(Expression::kValue, prop->obj()->context());
break;
case KEYED_PROPERTY:
Visit(prop->obj());
ASSERT_EQ(Expression::kValue, prop->obj()->context());
Visit(prop->key()); Visit(prop->key());
ASSERT_EQ(Expression::kValue, prop->key()->context()); ASSERT_EQ(Expression::kValue, prop->key()->context());
Visit(rhs); break;
ASSERT_EQ(Expression::kValue, rhs->context()); }
EmitKeyedPropertyAssignment(expr);
// If we have a compound assignment: Get value of LHS expression and
// store in on top of the stack.
// Note: Relies on kValue context being 'stack'.
if (expr->is_compound()) {
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
Expression::kValue);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(prop, Expression::kValue);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(Expression::kValue);
break;
} }
} else { }
UNREACHABLE();
// Evaluate RHS expression.
Expression* rhs = expr->value();
ASSERT_EQ(Expression::kValue, rhs->context());
Visit(rhs);
// If we have a compount assignment: Apply operator.
if (expr->is_compound()) {
EmitCompoundAssignmentOp(expr->binary_op(), Expression::kValue);
}
// Store the value.
switch (assign_type) {
case VARIABLE:
EmitVariableAssignment(expr);
break;
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
break;
} }
} }
@ -548,8 +674,20 @@ void FastCodeGenerator::VisitThrow(Throw* expr) {
} }
void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) { int FastCodeGenerator::TryFinally::Exit(int stack_depth) {
UNREACHABLE(); // The macros used here must preserve the result register.
__ Drop(stack_depth);
__ PopTryHandler();
__ Call(finally_entry_);
return 0;
}
int FastCodeGenerator::TryCatch::Exit(int stack_depth) {
// The macros used here must preserve the result register.
__ Drop(stack_depth);
__ PopTryHandler();
return 0;
} }

195
deps/v8/src/fast-codegen.h

@ -35,6 +35,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// -----------------------------------------------------------------------------
// Fast code generator.
class FastCodeGenerator: public AstVisitor { class FastCodeGenerator: public AstVisitor {
public: public:
@ -43,6 +45,7 @@ class FastCodeGenerator: public AstVisitor {
function_(NULL), function_(NULL),
script_(script), script_(script),
is_eval_(is_eval), is_eval_(is_eval),
nesting_stack_(NULL),
loop_depth_(0), loop_depth_(0),
true_label_(NULL), true_label_(NULL),
false_label_(NULL) { false_label_(NULL) {
@ -55,6 +58,159 @@ class FastCodeGenerator: public AstVisitor {
void Generate(FunctionLiteral* fun); void Generate(FunctionLiteral* fun);
private: private:
class Breakable;
class Iteration;
class TryCatch;
class TryFinally;
class Finally;
class ForIn;
class NestedStatement BASE_EMBEDDED {
public:
explicit NestedStatement(FastCodeGenerator* codegen) : codegen_(codegen) {
// Link into codegen's nesting stack.
previous_ = codegen->nesting_stack_;
codegen->nesting_stack_ = this;
}
virtual ~NestedStatement() {
// Unlink from codegen's nesting stack.
ASSERT_EQ(this, codegen_->nesting_stack_);
codegen_->nesting_stack_ = previous_;
}
virtual Breakable* AsBreakable() { return NULL; }
virtual Iteration* AsIteration() { return NULL; }
virtual TryCatch* AsTryCatch() { return NULL; }
virtual TryFinally* AsTryFinally() { return NULL; }
virtual Finally* AsFinally() { return NULL; }
virtual ForIn* AsForIn() { return NULL; }
virtual bool IsContinueTarget(Statement* target) { return false; }
virtual bool IsBreakTarget(Statement* target) { return false; }
// Generate code to leave the nested statement. This includes
// cleaning up any stack elements in use and restoring the
// stack to the expectations of the surrounding statements.
// Takes a number of stack elements currently on top of the
// nested statement's stack, and returns a number of stack
// elements left on top of the surrounding statement's stack.
// The generated code must preserve the result register (which
// contains the value in case of a return).
virtual int Exit(int stack_depth) {
// Default implementation for the case where there is
// nothing to clean up.
return stack_depth;
}
NestedStatement* outer() { return previous_; }
protected:
MacroAssembler* masm() { return codegen_->masm(); }
private:
FastCodeGenerator* codegen_;
NestedStatement* previous_;
DISALLOW_COPY_AND_ASSIGN(NestedStatement);
};
class Breakable : public NestedStatement {
public:
Breakable(FastCodeGenerator* codegen,
BreakableStatement* break_target)
: NestedStatement(codegen),
target_(break_target) {}
virtual ~Breakable() {}
virtual Breakable* AsBreakable() { return this; }
virtual bool IsBreakTarget(Statement* statement) {
return target_ == statement;
}
BreakableStatement* statement() { return target_; }
Label* break_target() { return &break_target_label_; }
private:
BreakableStatement* target_;
Label break_target_label_;
DISALLOW_COPY_AND_ASSIGN(Breakable);
};
class Iteration : public Breakable {
public:
Iteration(FastCodeGenerator* codegen,
IterationStatement* iteration_statement)
: Breakable(codegen, iteration_statement) {}
virtual ~Iteration() {}
virtual Iteration* AsIteration() { return this; }
virtual bool IsContinueTarget(Statement* statement) {
return this->statement() == statement;
}
Label* continue_target() { return &continue_target_label_; }
private:
Label continue_target_label_;
DISALLOW_COPY_AND_ASSIGN(Iteration);
};
// The environment inside the try block of a try/catch statement.
class TryCatch : public NestedStatement {
public:
explicit TryCatch(FastCodeGenerator* codegen, Label* catch_entry)
: NestedStatement(codegen), catch_entry_(catch_entry) { }
virtual ~TryCatch() {}
virtual TryCatch* AsTryCatch() { return this; }
Label* catch_entry() { return catch_entry_; }
virtual int Exit(int stack_depth);
private:
Label* catch_entry_;
DISALLOW_COPY_AND_ASSIGN(TryCatch);
};
// The environment inside the try block of a try/finally statement.
class TryFinally : public NestedStatement {
public:
explicit TryFinally(FastCodeGenerator* codegen, Label* finally_entry)
: NestedStatement(codegen), finally_entry_(finally_entry) { }
virtual ~TryFinally() {}
virtual TryFinally* AsTryFinally() { return this; }
Label* finally_entry() { return finally_entry_; }
virtual int Exit(int stack_depth);
private:
Label* finally_entry_;
DISALLOW_COPY_AND_ASSIGN(TryFinally);
};
// A FinallyEnvironment represents being inside a finally block.
// Abnormal termination of the finally block needs to clean up
// the block's parameters from the stack.
class Finally : public NestedStatement {
public:
explicit Finally(FastCodeGenerator* codegen) : NestedStatement(codegen) { }
virtual ~Finally() {}
virtual Finally* AsFinally() { return this; }
virtual int Exit(int stack_depth) {
return stack_depth + kFinallyStackElementCount;
}
private:
// Number of extra stack slots occupied during a finally block.
static const int kFinallyStackElementCount = 2;
DISALLOW_COPY_AND_ASSIGN(Finally);
};
// A ForInEnvironment represents being inside a for-in loop.
// Abnormal termination of the for-in block needs to clean up
// the block's temporary storage from the stack.
class ForIn : public Iteration {
public:
ForIn(FastCodeGenerator* codegen,
ForInStatement* statement)
: Iteration(codegen, statement) { }
virtual ~ForIn() {}
virtual ForIn* AsForIn() { return this; }
virtual int Exit(int stack_depth) {
return stack_depth + kForInStackElementCount;
}
private:
// TODO(lrn): Check that this value is correct when implementing
// for-in.
static const int kForInStackElementCount = 5;
DISALLOW_COPY_AND_ASSIGN(ForIn);
};
int SlotOffset(Slot* slot); int SlotOffset(Slot* slot);
void Move(Expression::Context destination, Register source); void Move(Expression::Context destination, Register source);
void Move(Expression::Context destination, Slot* source, Register scratch); void Move(Expression::Context destination, Slot* source, Register scratch);
@ -84,10 +240,25 @@ class FastCodeGenerator: public AstVisitor {
// Platform-specific code sequences for calls // Platform-specific code sequences for calls
void EmitCallWithStub(Call* expr); void EmitCallWithStub(Call* expr);
void EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info); void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode);
// Platform-specific code for loading variables.
void EmitVariableLoad(Variable* expr, Expression::Context context);
// Platform-specific support for compiling assignments. // Platform-specific support for compiling assignments.
// Load a value from a named property and push the result on the stack.
// The receiver is left on the stack by the IC.
void EmitNamedPropertyLoad(Property* expr, Expression::Context context);
// Load a value from a named property and push the result on the stack.
// The receiver and the key is left on the stack by the IC.
void EmitKeyedPropertyLoad(Expression::Context context);
// Apply the compound assignment operator. Expects both operands on top
// of the stack.
void EmitCompoundAssignmentOp(Token::Value op, Expression::Context context);
// Complete a variable assignment. The right-hand-side value is expected // Complete a variable assignment. The right-hand-side value is expected
// on top of the stack. // on top of the stack.
void EmitVariableAssignment(Assignment* expr); void EmitVariableAssignment(Assignment* expr);
@ -105,6 +276,12 @@ class FastCodeGenerator: public AstVisitor {
void SetStatementPosition(Statement* stmt); void SetStatementPosition(Statement* stmt);
void SetSourcePosition(int pos); void SetSourcePosition(int pos);
// Non-local control flow support.
void EnterFinallyBlock();
void ExitFinallyBlock();
void ThrowException();
// Loop nesting counter.
int loop_depth() { return loop_depth_; } int loop_depth() { return loop_depth_; }
void increment_loop_depth() { loop_depth_++; } void increment_loop_depth() { loop_depth_++; }
void decrement_loop_depth() { void decrement_loop_depth() {
@ -112,11 +289,22 @@ class FastCodeGenerator: public AstVisitor {
loop_depth_--; loop_depth_--;
} }
MacroAssembler* masm() { return masm_; }
static Register result_register();
static Register context_register();
// Set fields in the stack frame. Offsets are the frame pointer relative
// offsets defined in, e.g., StandardFrameConstants.
void StoreToFrameField(int frame_offset, Register value);
// Load a value from the current context. Indices are defined as an enum
// in v8::internal::Context.
void LoadContextField(Register dst, int context_index);
// AST node visit functions. // AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node); #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT) AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT #undef DECLARE_VISIT
// Handles the shortcutted logical binary operations in VisitBinaryOperation. // Handles the shortcutted logical binary operations in VisitBinaryOperation.
void EmitLogicalOperation(BinaryOperation* expr); void EmitLogicalOperation(BinaryOperation* expr);
@ -125,11 +313,14 @@ class FastCodeGenerator: public AstVisitor {
Handle<Script> script_; Handle<Script> script_;
bool is_eval_; bool is_eval_;
Label return_label_; Label return_label_;
NestedStatement* nesting_stack_;
int loop_depth_; int loop_depth_;
Label* true_label_; Label* true_label_;
Label* false_label_; Label* false_label_;
friend class NestedStatement;
DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator); DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
}; };

17
deps/v8/src/global-handles.cc

@ -168,6 +168,12 @@ class GlobalHandles::Node : public Malloced {
if (first_deallocated()) { if (first_deallocated()) {
first_deallocated()->set_next(head()); first_deallocated()->set_next(head());
} }
// Check that we are not passing a finalized external string to
// the callback.
ASSERT(!object_->IsExternalAsciiString() ||
ExternalAsciiString::cast(object_)->resource() != NULL);
ASSERT(!object_->IsExternalTwoByteString() ||
ExternalTwoByteString::cast(object_)->resource() != NULL);
// Leaving V8. // Leaving V8.
VMState state(EXTERNAL); VMState state(EXTERNAL);
func(object, par); func(object, par);
@ -436,15 +442,15 @@ void GlobalHandles::RecordStats(HeapStats* stats) {
*stats->near_death_global_handle_count = 0; *stats->near_death_global_handle_count = 0;
*stats->destroyed_global_handle_count = 0; *stats->destroyed_global_handle_count = 0;
for (Node* current = head_; current != NULL; current = current->next()) { for (Node* current = head_; current != NULL; current = current->next()) {
*stats->global_handle_count++; *stats->global_handle_count += 1;
if (current->state_ == Node::WEAK) { if (current->state_ == Node::WEAK) {
*stats->weak_global_handle_count++; *stats->weak_global_handle_count += 1;
} else if (current->state_ == Node::PENDING) { } else if (current->state_ == Node::PENDING) {
*stats->pending_global_handle_count++; *stats->pending_global_handle_count += 1;
} else if (current->state_ == Node::NEAR_DEATH) { } else if (current->state_ == Node::NEAR_DEATH) {
*stats->near_death_global_handle_count++; *stats->near_death_global_handle_count += 1;
} else if (current->state_ == Node::DESTROYED) { } else if (current->state_ == Node::DESTROYED) {
*stats->destroyed_global_handle_count++; *stats->destroyed_global_handle_count += 1;
} }
} }
} }
@ -507,5 +513,4 @@ void GlobalHandles::RemoveObjectGroups() {
object_groups->Clear(); object_groups->Clear();
} }
} } // namespace v8::internal } } // namespace v8::internal

19
deps/v8/src/globals.h

@ -145,6 +145,14 @@ const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
const intptr_t kPointerAlignment = (1 << kPointerSizeLog2); const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
const intptr_t kPointerAlignmentMask = kPointerAlignment - 1; const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
// Desired alignment for maps.
#if V8_HOST_ARCH_64_BIT
const intptr_t kMapAlignmentBits = kObjectAlignmentBits;
#else
const intptr_t kMapAlignmentBits = kObjectAlignmentBits + 3;
#endif
const intptr_t kMapAlignment = (1 << kMapAlignmentBits);
const intptr_t kMapAlignmentMask = kMapAlignment - 1;
// Tag information for Failure. // Tag information for Failure.
const int kFailureTag = 3; const int kFailureTag = 3;
@ -174,6 +182,11 @@ const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad);
#endif #endif
// Number of bits to represent the page size for paged spaces. The value of 13
// gives 8K bytes per page.
const int kPageSizeBits = 13;
// Constants relevant to double precision floating point numbers. // Constants relevant to double precision floating point numbers.
// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no // Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
@ -294,7 +307,7 @@ enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
enum Executability { NOT_EXECUTABLE, EXECUTABLE }; enum Executability { NOT_EXECUTABLE, EXECUTABLE };
enum VisitMode { VISIT_ALL, VISIT_ONLY_STRONG }; enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, VISIT_ONLY_STRONG };
// A CodeDesc describes a buffer holding instructions and relocation // A CodeDesc describes a buffer holding instructions and relocation
@ -450,6 +463,10 @@ enum StateTag {
#define POINTER_SIZE_ALIGN(value) \ #define POINTER_SIZE_ALIGN(value) \
(((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask) (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
// MAP_SIZE_ALIGN returns the value aligned as a map pointer.
#define MAP_SIZE_ALIGN(value) \
(((value) + kMapAlignmentMask) & ~kMapAlignmentMask)
// The expression OFFSET_OF(type, field) computes the byte-offset // The expression OFFSET_OF(type, field) computes the byte-offset
// of the specified field relative to the containing type. This // of the specified field relative to the containing type. This
// corresponds to 'offsetof' (in stddef.h), except that it doesn't // corresponds to 'offsetof' (in stddef.h), except that it doesn't

69
deps/v8/src/heap-inl.h

@ -109,6 +109,19 @@ Object* Heap::NumberFromUint32(uint32_t value) {
} }
void Heap::FinalizeExternalString(String* string) {
ASSERT(string->IsExternalString());
v8::String::ExternalStringResourceBase** resource_addr =
reinterpret_cast<v8::String::ExternalStringResourceBase**>(
reinterpret_cast<byte*>(string) +
ExternalString::kResourceOffset -
kHeapObjectTag);
delete *resource_addr;
// Clear the resource pointer in the string.
*resource_addr = NULL;
}
Object* Heap::AllocateRawMap() { Object* Heap::AllocateRawMap() {
#ifdef DEBUG #ifdef DEBUG
Counters::objs_since_last_full.Increment(); Counters::objs_since_last_full.Increment();
@ -116,6 +129,12 @@ Object* Heap::AllocateRawMap() {
#endif #endif
Object* result = map_space_->AllocateRaw(Map::kSize); Object* result = map_space_->AllocateRaw(Map::kSize);
if (result->IsFailure()) old_gen_exhausted_ = true; if (result->IsFailure()) old_gen_exhausted_ = true;
#ifdef DEBUG
if (!result->IsFailure()) {
// Maps have their own alignment.
CHECK((OffsetFrom(result) & kMapAlignmentMask) == kHeapObjectTag);
}
#endif
return result; return result;
} }
@ -321,6 +340,56 @@ inline bool Heap::allow_allocation(bool new_state) {
#endif #endif
void ExternalStringTable::AddString(String* string) {
ASSERT(string->IsExternalString());
if (Heap::InNewSpace(string)) {
new_space_strings_.Add(string);
} else {
old_space_strings_.Add(string);
}
}
void ExternalStringTable::Iterate(ObjectVisitor* v) {
if (!new_space_strings_.is_empty()) {
Object** start = &new_space_strings_[0];
v->VisitPointers(start, start + new_space_strings_.length());
}
if (!old_space_strings_.is_empty()) {
Object** start = &old_space_strings_[0];
v->VisitPointers(start, start + old_space_strings_.length());
}
}
// Verify() is inline to avoid ifdef-s around its calls in release
// mode.
void ExternalStringTable::Verify() {
#ifdef DEBUG
for (int i = 0; i < new_space_strings_.length(); ++i) {
ASSERT(Heap::InNewSpace(new_space_strings_[i]));
ASSERT(new_space_strings_[i] != Heap::raw_unchecked_null_value());
}
for (int i = 0; i < old_space_strings_.length(); ++i) {
ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
ASSERT(old_space_strings_[i] != Heap::raw_unchecked_null_value());
}
#endif
}
void ExternalStringTable::AddOldString(String* string) {
ASSERT(string->IsExternalString());
ASSERT(!Heap::InNewSpace(string));
old_space_strings_.Add(string);
}
void ExternalStringTable::ShrinkNewStrings(int position) {
new_space_strings_.Rewind(position);
Verify();
}
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_HEAP_INL_H_ #endif // V8_HEAP_INL_H_

5
deps/v8/src/heap-profiler.cc

@ -667,8 +667,9 @@ void ProducerHeapProfile::Setup() {
can_log_ = true; can_log_ = true;
} }
void ProducerHeapProfile::RecordJSObjectAllocation(Object* obj) { void ProducerHeapProfile::DoRecordJSObjectAllocation(Object* obj) {
if (!can_log_ || !FLAG_log_producers) return; ASSERT(FLAG_log_producers);
if (!can_log_) return;
int framesCount = 0; int framesCount = 0;
for (JavaScriptFrameIterator it; !it.done(); it.Advance()) { for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
++framesCount; ++framesCount;

6
deps/v8/src/heap-profiler.h

@ -261,8 +261,12 @@ class RetainerHeapProfile BASE_EMBEDDED {
class ProducerHeapProfile : public AllStatic { class ProducerHeapProfile : public AllStatic {
public: public:
static void Setup(); static void Setup();
static void RecordJSObjectAllocation(Object* obj); static void RecordJSObjectAllocation(Object* obj) {
if (FLAG_log_producers) DoRecordJSObjectAllocation(obj);
}
private: private:
static void DoRecordJSObjectAllocation(Object* obj);
static bool can_log_; static bool can_log_;
}; };

159
deps/v8/src/heap.cc

@ -733,7 +733,7 @@ void Heap::Scavenge() {
ScavengeVisitor scavenge_visitor; ScavengeVisitor scavenge_visitor;
// Copy roots. // Copy roots.
IterateRoots(&scavenge_visitor, VISIT_ALL); IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
// Copy objects reachable from the old generation. By definition, // Copy objects reachable from the old generation. By definition,
// there are no intergenerational pointers in code or data spaces. // there are no intergenerational pointers in code or data spaces.
@ -753,6 +753,63 @@ void Heap::Scavenge() {
} }
} }
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
ScavengeExternalStringTable();
ASSERT(new_space_front == new_space_.top());
// Set age mark.
new_space_.set_age_mark(new_space_.top());
// Update how much has survived scavenge.
survived_since_last_expansion_ +=
(PromotedSpaceSize() - survived_watermark) + new_space_.Size();
LOG(ResourceEvent("scavenge", "end"));
gc_state_ = NOT_IN_GC;
}
void Heap::ScavengeExternalStringTable() {
ExternalStringTable::Verify();
if (ExternalStringTable::new_space_strings_.is_empty()) return;
Object** start = &ExternalStringTable::new_space_strings_[0];
Object** end = start + ExternalStringTable::new_space_strings_.length();
Object** last = start;
for (Object** p = start; p < end; ++p) {
ASSERT(Heap::InFromSpace(*p));
MapWord first_word = HeapObject::cast(*p)->map_word();
if (!first_word.IsForwardingAddress()) {
// Unreachable external string can be finalized.
FinalizeExternalString(String::cast(*p));
continue;
}
// String is still reachable.
String* target = String::cast(first_word.ToForwardingAddress());
ASSERT(target->IsExternalString());
if (Heap::InNewSpace(target)) {
// String is still in new space. Update the table entry.
*last = target;
++last;
} else {
// String got promoted. Move it to the old string list.
ExternalStringTable::AddOldString(target);
}
}
ExternalStringTable::ShrinkNewStrings(last - start);
}
Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
Address new_space_front) {
do { do {
ASSERT(new_space_front <= new_space_.top()); ASSERT(new_space_front <= new_space_.top());
@ -761,7 +818,7 @@ void Heap::Scavenge() {
// queue is empty. // queue is empty.
while (new_space_front < new_space_.top()) { while (new_space_front < new_space_.top()) {
HeapObject* object = HeapObject::FromAddress(new_space_front); HeapObject* object = HeapObject::FromAddress(new_space_front);
object->Iterate(&scavenge_visitor); object->Iterate(scavenge_visitor);
new_space_front += object->Size(); new_space_front += object->Size();
} }
@ -783,7 +840,7 @@ void Heap::Scavenge() {
RecordCopiedObject(target); RecordCopiedObject(target);
#endif #endif
// Visit the newly copied object for pointers to new space. // Visit the newly copied object for pointers to new space.
target->Iterate(&scavenge_visitor); target->Iterate(scavenge_visitor);
UpdateRSet(target); UpdateRSet(target);
} }
@ -791,16 +848,7 @@ void Heap::Scavenge() {
// (there are currently no more unswept promoted objects). // (there are currently no more unswept promoted objects).
} while (new_space_front < new_space_.top()); } while (new_space_front < new_space_.top());
// Set age mark. return new_space_front;
new_space_.set_age_mark(new_space_.top());
// Update how much has survived scavenge.
survived_since_last_expansion_ +=
(PromotedSpaceSize() - survived_watermark) + new_space_.Size();
LOG(ResourceEvent("scavenge", "end"));
gc_state_ = NOT_IN_GC;
} }
@ -1094,6 +1142,13 @@ Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
map->set_unused_property_fields(0); map->set_unused_property_fields(0);
map->set_bit_field(0); map->set_bit_field(0);
map->set_bit_field2(0); map->set_bit_field2(0);
// If the map object is aligned fill the padding area with Smi 0 objects.
if (Map::kPadStart < Map::kSize) {
memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
0,
Map::kSize - Map::kPadStart);
}
return map; return map;
} }
@ -2185,8 +2240,11 @@ Object* Heap::AllocateFunctionPrototype(JSFunction* function) {
Object* Heap::AllocateFunction(Map* function_map, Object* Heap::AllocateFunction(Map* function_map,
SharedFunctionInfo* shared, SharedFunctionInfo* shared,
Object* prototype) { Object* prototype,
Object* result = Allocate(function_map, OLD_POINTER_SPACE); PretenureFlag pretenure) {
AllocationSpace space =
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
Object* result = Allocate(function_map, space);
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
return InitializeFunction(JSFunction::cast(result), shared, prototype); return InitializeFunction(JSFunction::cast(result), shared, prototype);
} }
@ -2203,10 +2261,14 @@ Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
JSObject* boilerplate = JSObject* boilerplate =
Top::context()->global_context()->arguments_boilerplate(); Top::context()->global_context()->arguments_boilerplate();
// Make the clone. // Check that the size of the boilerplate matches our
Map* map = boilerplate->map(); // expectations. The ArgumentsAccessStub::GenerateNewObject relies
int object_size = map->instance_size(); // on the size being a known constant.
Object* result = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE); ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size());
// Do the allocation.
Object* result =
AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE);
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
// Copy the content. The arguments boilerplate doesn't have any // Copy the content. The arguments boilerplate doesn't have any
@ -2214,7 +2276,7 @@ Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
// barrier here. // barrier here.
CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()), CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()),
reinterpret_cast<Object**>(boilerplate->address()), reinterpret_cast<Object**>(boilerplate->address()),
object_size); kArgumentsObjectSize);
// Set the two properties. // Set the two properties.
JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index, JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
@ -3175,6 +3237,11 @@ void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
IterateStrongRoots(v, mode); IterateStrongRoots(v, mode);
v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex])); v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
v->Synchronize("symbol_table"); v->Synchronize("symbol_table");
if (mode != VISIT_ALL_IN_SCAVENGE) {
// Scavenge collections have special processing for this.
ExternalStringTable::Iterate(v);
}
v->Synchronize("external_string_table");
} }
@ -3203,11 +3270,12 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
HandleScopeImplementer::Iterate(v); HandleScopeImplementer::Iterate(v);
v->Synchronize("handlescope"); v->Synchronize("handlescope");
// Iterate over the builtin code objects and code stubs in the heap. Note // Iterate over the builtin code objects and code stubs in the
// that it is not strictly necessary to iterate over code objects on // heap. Note that it is not necessary to iterate over code objects
// scavenge collections. We still do it here because this same function // on scavenge collections.
// is used by the mark-sweep collector and the deserializer. if (mode != VISIT_ALL_IN_SCAVENGE) {
Builtins::IterateBuiltins(v); Builtins::IterateBuiltins(v);
}
v->Synchronize("builtins"); v->Synchronize("builtins");
// Iterate over global handles. // Iterate over global handles.
@ -3424,6 +3492,8 @@ void Heap::SetStackLimits() {
void Heap::TearDown() { void Heap::TearDown() {
GlobalHandles::TearDown(); GlobalHandles::TearDown();
ExternalStringTable::TearDown();
new_space_.TearDown(); new_space_.TearDown();
if (old_pointer_space_ != NULL) { if (old_pointer_space_ != NULL) {
@ -3839,8 +3909,8 @@ class MarkRootVisitor: public ObjectVisitor {
// Triggers a depth-first traversal of reachable objects from roots // Triggers a depth-first traversal of reachable objects from roots
// and finds a path to a specific heap object and prints it. // and finds a path to a specific heap object and prints it.
void Heap::TracePathToObject() { void Heap::TracePathToObject(Object* target) {
search_target = NULL; search_target = target;
search_for_any_global = false; search_for_any_global = false;
MarkRootVisitor root_visitor; MarkRootVisitor root_visitor;
@ -3907,8 +3977,8 @@ const char* GCTracer::CollectorString() {
int KeyedLookupCache::Hash(Map* map, String* name) { int KeyedLookupCache::Hash(Map* map, String* name) {
// Uses only lower 32 bits if pointers are larger. // Uses only lower 32 bits if pointers are larger.
uintptr_t addr_hash = uintptr_t addr_hash =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> 2; static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
return (addr_hash ^ name->Hash()) % kLength; return (addr_hash ^ name->Hash()) & kCapacityMask;
} }
@ -3991,4 +4061,35 @@ void TranscendentalCache::Clear() {
} }
void ExternalStringTable::CleanUp() {
int last = 0;
for (int i = 0; i < new_space_strings_.length(); ++i) {
if (new_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
if (Heap::InNewSpace(new_space_strings_[i])) {
new_space_strings_[last++] = new_space_strings_[i];
} else {
old_space_strings_.Add(new_space_strings_[i]);
}
}
new_space_strings_.Rewind(last);
last = 0;
for (int i = 0; i < old_space_strings_.length(); ++i) {
if (old_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
old_space_strings_[last++] = old_space_strings_[i];
}
old_space_strings_.Rewind(last);
Verify();
}
void ExternalStringTable::TearDown() {
new_space_strings_.Free();
old_space_strings_.Free();
}
List<Object*> ExternalStringTable::new_space_strings_;
List<Object*> ExternalStringTable::old_space_strings_;
} } // namespace v8::internal } } // namespace v8::internal

69
deps/v8/src/heap.h

@ -487,9 +487,12 @@ class Heap : public AllStatic {
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static Object* AllocateFunction(Map* function_map, static Object* AllocateFunction(Map* function_map,
SharedFunctionInfo* shared, SharedFunctionInfo* shared,
Object* prototype); Object* prototype,
PretenureFlag pretenure = TENURED);
// Indicies for direct access into argument objects. // Indicies for direct access into argument objects.
static const int kArgumentsObjectSize =
JSObject::kHeaderSize + 2 * kPointerSize;
static const int arguments_callee_index = 0; static const int arguments_callee_index = 0;
static const int arguments_length_index = 1; static const int arguments_length_index = 1;
@ -566,6 +569,10 @@ class Heap : public AllStatic {
static Object* AllocateExternalStringFromTwoByte( static Object* AllocateExternalStringFromTwoByte(
ExternalTwoByteString::Resource* resource); ExternalTwoByteString::Resource* resource);
// Finalizes an external string by deleting the associated external
// data and clearing the resource pointer.
static inline void FinalizeExternalString(String* string);
// Allocates an uninitialized object. The memory is non-executable if the // Allocates an uninitialized object. The memory is non-executable if the
// hardware and OS allow. // hardware and OS allow.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@ -778,7 +785,7 @@ class Heap : public AllStatic {
return disallow_allocation_failure_; return disallow_allocation_failure_;
} }
static void TracePathToObject(); static void TracePathToObject(Object* target);
static void TracePathToGlobal(); static void TracePathToGlobal();
#endif #endif
@ -886,7 +893,7 @@ class Heap : public AllStatic {
// The number of MapSpace pages is limited by the way we pack // The number of MapSpace pages is limited by the way we pack
// Map pointers during GC. // Map pointers during GC.
static const int kMaxMapSpaceSize = static const int kMaxMapSpaceSize =
(1 << MapWord::kMapPageIndexBits) * Page::kPageSize; (1 << (MapWord::kMapPageIndexBits)) * Page::kPageSize;
#if defined(V8_TARGET_ARCH_X64) #if defined(V8_TARGET_ARCH_X64)
static const int kMaxObjectSizeInNewSpace = 512*KB; static const int kMaxObjectSizeInNewSpace = 512*KB;
@ -1039,6 +1046,9 @@ class Heap : public AllStatic {
// Performs a minor collection in new generation. // Performs a minor collection in new generation.
static void Scavenge(); static void Scavenge();
static void ScavengeExternalStringTable();
static Address DoScavenge(ObjectVisitor* scavenge_visitor,
Address new_space_front);
// Performs a major collection in the whole heap. // Performs a major collection in the whole heap.
static void MarkCompact(GCTracer* tracer); static void MarkCompact(GCTracer* tracer);
@ -1293,17 +1303,33 @@ class KeyedLookupCache {
// Clear the cache. // Clear the cache.
static void Clear(); static void Clear();
static const int kLength = 64;
static const int kCapacityMask = kLength - 1;
static const int kMapHashShift = 2;
private: private:
static inline int Hash(Map* map, String* name); static inline int Hash(Map* map, String* name);
static const int kLength = 64;
// Get the address of the keys and field_offsets arrays. Used in
// generated code to perform cache lookups.
static Address keys_address() {
return reinterpret_cast<Address>(&keys_);
}
static Address field_offsets_address() {
return reinterpret_cast<Address>(&field_offsets_);
}
struct Key { struct Key {
Map* map; Map* map;
String* name; String* name;
}; };
static Key keys_[kLength]; static Key keys_[kLength];
static int field_offsets_[kLength]; static int field_offsets_[kLength];
};
friend class ExternalReference;
};
// Cache for mapping (array, property name) into descriptor index. // Cache for mapping (array, property name) into descriptor index.
@ -1623,6 +1649,39 @@ class TranscendentalCache {
}; };
// External strings table is a place where all external strings are
// registered. We need to keep track of such strings to properly
// finalize them.
class ExternalStringTable : public AllStatic {
public:
// Registers an external string.
inline static void AddString(String* string);
inline static void Iterate(ObjectVisitor* v);
// Restores internal invariant and gets rid of collected strings.
// Must be called after each Iterate() that modified the strings.
static void CleanUp();
// Destroys all allocated memory.
static void TearDown();
private:
friend class Heap;
inline static void Verify();
inline static void AddOldString(String* string);
// Notifies the table that only a prefix of the new list is valid.
inline static void ShrinkNewStrings(int position);
// To speed up scavenge collections new space string are kept
// separate from old space strings.
static List<Object*> new_space_strings_;
static List<Object*> old_space_strings_;
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_HEAP_H_ #endif // V8_HEAP_H_

11
deps/v8/src/ia32/assembler-ia32.cc

@ -2004,6 +2004,17 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) {
} }
void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x57);
emit_sse_operand(dst, src);
}
void Assembler::comisd(XMMRegister dst, XMMRegister src) { void Assembler::comisd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2)); ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);

1
deps/v8/src/ia32/assembler-ia32.h

@ -745,6 +745,7 @@ class Assembler : public Malloced {
void subsd(XMMRegister dst, XMMRegister src); void subsd(XMMRegister dst, XMMRegister src);
void mulsd(XMMRegister dst, XMMRegister src); void mulsd(XMMRegister dst, XMMRegister src);
void divsd(XMMRegister dst, XMMRegister src); void divsd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
void comisd(XMMRegister dst, XMMRegister src); void comisd(XMMRegister dst, XMMRegister src);

47
deps/v8/src/ia32/builtins-ia32.cc

@ -472,35 +472,38 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ bind(&done); __ bind(&done);
} }
// 4. Shift stuff one slot down the stack. // 4. Check that the function really is a function.
{ Label done;
__ test(edi, Operand(edi));
__ j(not_zero, &done, taken);
__ xor_(ebx, Operand(ebx));
// CALL_NON_FUNCTION will expect to find the non-function callee on the
// expression stack of the caller. Transfer it from receiver to the
// caller's expression stack (and make the first argument the receiver
// for CALL_NON_FUNCTION) by decrementing the argument count.
__ dec(eax);
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
__ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
__ bind(&done);
}
// 5. Shift arguments and return address one slot down on the stack
// (overwriting the receiver).
{ Label loop; { Label loop;
__ lea(ecx, Operand(eax, +1)); // +1 ~ copy receiver too __ mov(ecx, eax);
__ bind(&loop); __ bind(&loop);
__ mov(ebx, Operand(esp, ecx, times_4, 0)); __ mov(ebx, Operand(esp, ecx, times_4, 0));
__ mov(Operand(esp, ecx, times_4, kPointerSize), ebx); __ mov(Operand(esp, ecx, times_4, kPointerSize), ebx);
__ dec(ecx); __ dec(ecx);
__ j(not_zero, &loop); __ j(not_sign, &loop);
__ pop(ebx); // Discard copy of return address.
__ dec(eax); // One fewer argument (first argument is new receiver).
} }
// 5. Remove TOS (copy of last arguments), but keep return address. // 6. Get the code to call from the function and check that the number of
__ pop(ebx); // expected arguments matches what we're providing.
__ pop(ecx); { __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ push(ebx);
__ dec(eax);
// 6. Check that function really was a function and get the code to
// call from the function and check that the number of expected
// arguments matches what we're providing.
{ Label invoke;
__ test(edi, Operand(edi));
__ j(not_zero, &invoke, taken);
__ xor_(ebx, Operand(ebx));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
__ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
__ bind(&invoke);
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx, __ mov(ebx,
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));

458
deps/v8/src/ia32/codegen-ia32.cc

@ -174,12 +174,19 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
function_return_is_shadowed_ = false; function_return_is_shadowed_ = false;
// Allocate the local context if needed. // Allocate the local context if needed.
if (scope_->num_heap_slots() > 0) { int heap_slots = scope_->num_heap_slots();
if (heap_slots > 0) {
Comment cmnt(masm_, "[ allocate local context"); Comment cmnt(masm_, "[ allocate local context");
// Allocate local context. // Allocate local context.
// Get outer context and create a new context based on it. // Get outer context and create a new context based on it.
frame_->PushFunction(); frame_->PushFunction();
Result context = frame_->CallRuntime(Runtime::kNewContext, 1); Result context;
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
context = frame_->CallStub(&stub, 1);
} else {
context = frame_->CallRuntime(Runtime::kNewContext, 1);
}
// Update context local. // Update context local.
frame_->SaveContextRegister(); frame_->SaveContextRegister();
@ -763,19 +770,27 @@ class FloatingPointHelper : public AllStatic {
const char* GenericBinaryOpStub::GetName() { const char* GenericBinaryOpStub::GetName() {
switch (op_) { if (name_ != NULL) return name_;
case Token::ADD: return "GenericBinaryOpStub_ADD"; const int len = 100;
case Token::SUB: return "GenericBinaryOpStub_SUB"; name_ = Bootstrapper::AllocateAutoDeletedArray(len);
case Token::MUL: return "GenericBinaryOpStub_MUL"; if (name_ == NULL) return "OOM";
case Token::DIV: return "GenericBinaryOpStub_DIV"; const char* op_name = Token::Name(op_);
case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR"; const char* overwrite_name;
case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND"; switch (mode_) {
case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR"; case NO_OVERWRITE: overwrite_name = "Alloc"; break;
case Token::SAR: return "GenericBinaryOpStub_SAR"; case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
case Token::SHL: return "GenericBinaryOpStub_SHL"; case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
case Token::SHR: return "GenericBinaryOpStub_SHR"; default: overwrite_name = "UnknownOverwrite"; break;
default: return "GenericBinaryOpStub"; }
}
OS::SNPrintF(Vector<char>(name_, len),
"GenericBinaryOpStub_%s_%s%s_%s%s",
op_name,
overwrite_name,
(flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
args_in_registers_ ? "RegArgs" : "StackArgs",
args_reversed_ ? "_R" : "");
return name_;
} }
@ -803,14 +818,88 @@ class DeferredInlineBinaryOperation: public DeferredCode {
void DeferredInlineBinaryOperation::Generate() { void DeferredInlineBinaryOperation::Generate() {
Label done;
if (CpuFeatures::IsSupported(SSE2) && ((op_ == Token::ADD) ||
(op_ ==Token::SUB) ||
(op_ == Token::MUL) ||
(op_ == Token::DIV))) {
CpuFeatures::Scope use_sse2(SSE2);
Label call_runtime, after_alloc_failure;
Label left_smi, right_smi, load_right, do_op;
__ test(left_, Immediate(kSmiTagMask));
__ j(zero, &left_smi);
__ cmp(FieldOperand(left_, HeapObject::kMapOffset),
Factory::heap_number_map());
__ j(not_equal, &call_runtime);
__ movdbl(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
if (mode_ == OVERWRITE_LEFT) {
__ mov(dst_, left_);
}
__ jmp(&load_right);
__ bind(&left_smi);
__ sar(left_, 1);
__ cvtsi2sd(xmm0, Operand(left_));
__ shl(left_, 1);
if (mode_ == OVERWRITE_LEFT) {
Label alloc_failure;
__ push(left_);
__ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
__ pop(left_);
}
__ bind(&load_right);
__ test(right_, Immediate(kSmiTagMask));
__ j(zero, &right_smi);
__ cmp(FieldOperand(right_, HeapObject::kMapOffset),
Factory::heap_number_map());
__ j(not_equal, &call_runtime);
__ movdbl(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
if (mode_ == OVERWRITE_RIGHT) {
__ mov(dst_, right_);
} else if (mode_ == NO_OVERWRITE) {
Label alloc_failure;
__ push(left_);
__ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
__ pop(left_);
}
__ jmp(&do_op);
__ bind(&right_smi);
__ sar(right_, 1);
__ cvtsi2sd(xmm1, Operand(right_));
__ shl(right_, 1);
if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
Label alloc_failure;
__ push(left_);
__ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
__ pop(left_);
}
__ bind(&do_op);
switch (op_) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm0, xmm1); break;
case Token::MUL: __ mulsd(xmm0, xmm1); break;
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
__ movdbl(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
__ jmp(&done);
__ bind(&after_alloc_failure);
__ pop(left_);
__ bind(&call_runtime);
}
GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB); GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
stub.GenerateCall(masm_, left_, right_); stub.GenerateCall(masm_, left_, right_);
if (!dst_.is(eax)) __ mov(dst_, eax); if (!dst_.is(eax)) __ mov(dst_, eax);
__ bind(&done);
} }
void CodeGenerator::GenericBinaryOperation(Token::Value op, void CodeGenerator::GenericBinaryOperation(Token::Value op,
SmiAnalysis* type, StaticType* type,
OverwriteMode overwrite_mode) { OverwriteMode overwrite_mode) {
Comment cmnt(masm_, "[ BinaryOperation"); Comment cmnt(masm_, "[ BinaryOperation");
Comment cmnt_token(masm_, Token::String(op)); Comment cmnt_token(masm_, Token::String(op));
@ -1491,7 +1580,7 @@ void DeferredInlineSmiSub::Generate() {
void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
Result* operand, Result* operand,
Handle<Object> value, Handle<Object> value,
SmiAnalysis* type, StaticType* type,
bool reversed, bool reversed,
OverwriteMode overwrite_mode) { OverwriteMode overwrite_mode) {
// NOTE: This is an attempt to inline (a bit) more of the code for // NOTE: This is an attempt to inline (a bit) more of the code for
@ -1776,7 +1865,8 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
} }
void CodeGenerator::Comparison(Condition cc, void CodeGenerator::Comparison(AstNode* node,
Condition cc,
bool strict, bool strict,
ControlDestination* dest) { ControlDestination* dest) {
// Strict only makes sense for equality comparisons. // Strict only makes sense for equality comparisons.
@ -1823,7 +1913,8 @@ void CodeGenerator::Comparison(Condition cc,
default: default:
UNREACHABLE(); UNREACHABLE();
} }
} else { // Only one side is a constant Smi. } else {
// Only one side is a constant Smi.
// If left side is a constant Smi, reverse the operands. // If left side is a constant Smi, reverse the operands.
// Since one side is a constant Smi, conversion order does not matter. // Since one side is a constant Smi, conversion order does not matter.
if (left_side_constant_smi) { if (left_side_constant_smi) {
@ -1837,6 +1928,8 @@ void CodeGenerator::Comparison(Condition cc,
// Implement comparison against a constant Smi, inlining the case // Implement comparison against a constant Smi, inlining the case
// where both sides are Smis. // where both sides are Smis.
left_side.ToRegister(); left_side.ToRegister();
Register left_reg = left_side.reg();
Handle<Object> right_val = right_side.handle();
// Here we split control flow to the stub call and inlined cases // Here we split control flow to the stub call and inlined cases
// before finally splitting it to the control destination. We use // before finally splitting it to the control destination. We use
@ -1844,11 +1937,50 @@ void CodeGenerator::Comparison(Condition cc,
// the first split. We manually handle the off-frame references // the first split. We manually handle the off-frame references
// by reconstituting them on the non-fall-through path. // by reconstituting them on the non-fall-through path.
JumpTarget is_smi; JumpTarget is_smi;
Register left_reg = left_side.reg();
Handle<Object> right_val = right_side.handle();
__ test(left_side.reg(), Immediate(kSmiTagMask)); __ test(left_side.reg(), Immediate(kSmiTagMask));
is_smi.Branch(zero, taken); is_smi.Branch(zero, taken);
bool is_for_loop_compare = (node->AsCompareOperation() != NULL)
&& node->AsCompareOperation()->is_for_loop_condition();
if (!is_for_loop_compare
&& CpuFeatures::IsSupported(SSE2)
&& right_val->IsSmi()) {
// Right side is a constant smi and left side has been checked
// not to be a smi.
CpuFeatures::Scope use_sse2(SSE2);
JumpTarget not_number;
__ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map()));
not_number.Branch(not_equal, &left_side);
__ movdbl(xmm1,
FieldOperand(left_reg, HeapNumber::kValueOffset));
int value = Smi::cast(*right_val)->value();
if (value == 0) {
__ xorpd(xmm0, xmm0);
} else {
Result temp = allocator()->Allocate();
__ mov(temp.reg(), Immediate(value));
__ cvtsi2sd(xmm0, Operand(temp.reg()));
temp.Unuse();
}
__ comisd(xmm1, xmm0);
// Jump to builtin for NaN.
not_number.Branch(parity_even, &left_side);
left_side.Unuse();
Condition double_cc = cc;
switch (cc) {
case less: double_cc = below; break;
case equal: double_cc = equal; break;
case less_equal: double_cc = below_equal; break;
case greater: double_cc = above; break;
case greater_equal: double_cc = above_equal; break;
default: UNREACHABLE();
}
dest->true_target()->Branch(double_cc);
dest->false_target()->Jump();
not_number.Bind(&left_side);
}
// Setup and call the compare stub. // Setup and call the compare stub.
CompareStub stub(cc, strict); CompareStub stub(cc, strict);
Result result = frame_->CallStub(&stub, &left_side, &right_side); Result result = frame_->CallStub(&stub, &left_side, &right_side);
@ -1872,6 +2004,7 @@ void CodeGenerator::Comparison(Condition cc,
right_side.Unuse(); right_side.Unuse();
dest->Split(cc); dest->Split(cc);
} }
} else if (cc == equal && } else if (cc == equal &&
(left_side_constant_null || right_side_constant_null)) { (left_side_constant_null || right_side_constant_null)) {
// To make null checks efficient, we check if either the left side or // To make null checks efficient, we check if either the left side or
@ -1908,7 +2041,8 @@ void CodeGenerator::Comparison(Condition cc,
operand.Unuse(); operand.Unuse();
dest->Split(not_zero); dest->Split(not_zero);
} }
} else { // Neither side is a constant Smi or null. } else {
// Neither side is a constant Smi or null.
// If either side is a non-smi constant, skip the smi check. // If either side is a non-smi constant, skip the smi check.
bool known_non_smi = bool known_non_smi =
(left_side.is_constant() && !left_side.handle()->IsSmi()) || (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
@ -2575,7 +2709,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
// Compare and branch to the body if true or the next test if // Compare and branch to the body if true or the next test if
// false. Prefer the next test as a fall through. // false. Prefer the next test as a fall through.
ControlDestination dest(clause->body_target(), &next_test, false); ControlDestination dest(clause->body_target(), &next_test, false);
Comparison(equal, true, &dest); Comparison(node, equal, true, &dest);
// If the comparison fell through to the true target, jump to the // If the comparison fell through to the true target, jump to the
// actual body. // actual body.
@ -3585,18 +3719,28 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) { void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
// Call the runtime to instantiate the function boilerplate object.
// The inevitable call will sync frame elements to memory anyway, so
// we do it eagerly to allow us to push the arguments directly into
// place.
ASSERT(boilerplate->IsBoilerplate()); ASSERT(boilerplate->IsBoilerplate());
frame_->SyncRange(0, frame_->element_count() - 1);
// Create a new closure. // Use the fast case closure allocation code that allocates in new
frame_->EmitPush(esi); // space for nested functions that don't need literals cloning.
frame_->EmitPush(Immediate(boilerplate)); if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
Result result = frame_->CallRuntime(Runtime::kNewClosure, 2); FastNewClosureStub stub;
frame_->Push(&result); frame_->Push(boilerplate);
Result answer = frame_->CallStub(&stub, 1);
frame_->Push(&answer);
} else {
// Call the runtime to instantiate the function boilerplate
// object. The inevitable call will sync frame elements to memory
// anyway, so we do it eagerly to allow us to push the arguments
// directly into place.
frame_->SyncRange(0, frame_->element_count() - 1);
// Create a new closure.
frame_->EmitPush(esi);
frame_->EmitPush(Immediate(boilerplate));
Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
frame_->Push(&result);
}
} }
@ -4295,18 +4439,23 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
// Push the resulting array literal boilerplate on the stack. // Push the resulting array literal boilerplate on the stack.
frame_->Push(&boilerplate); frame_->Push(&boilerplate);
// Clone the boilerplate object. // Clone the boilerplate object.
Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate; int length = node->values()->length();
if (node->depth() == 1) { Result clone;
clone_function_id = Runtime::kCloneShallowLiteralBoilerplate; if (node->depth() == 1 &&
length <= FastCloneShallowArrayStub::kMaximumLength) {
FastCloneShallowArrayStub stub(length);
clone = frame_->CallStub(&stub, 1);
} else {
clone = frame_->CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
} }
Result clone = frame_->CallRuntime(clone_function_id, 1);
// Push the newly cloned literal object as the result. // Push the newly cloned literal object as the result.
frame_->Push(&clone); frame_->Push(&clone);
// Generate code to set the elements in the array that are not // Generate code to set the elements in the array that are not
// literals. // literals.
for (int i = 0; i < node->values()->length(); i++) { for (int i = 0; i < length; i++) {
Expression* value = node->values()->at(i); Expression* value = node->values()->at(i);
// If value is a literal the property value is already set in the // If value is a literal the property value is already set in the
@ -4535,9 +4684,6 @@ void CodeGenerator::VisitCall(Call* node) {
// JavaScript example: 'foo(1, 2, 3)' // foo is global // JavaScript example: 'foo(1, 2, 3)' // foo is global
// ---------------------------------- // ----------------------------------
// Push the name of the function and the receiver onto the stack.
frame_->Push(var->name());
// Pass the global object as the receiver and let the IC stub // Pass the global object as the receiver and let the IC stub
// patch the stack to use the global proxy as 'this' in the // patch the stack to use the global proxy as 'this' in the
// invoked function. // invoked function.
@ -4549,14 +4695,16 @@ void CodeGenerator::VisitCall(Call* node) {
Load(args->at(i)); Load(args->at(i));
} }
// Push the name of the function onto the frame.
frame_->Push(var->name());
// Call the IC initialization code. // Call the IC initialization code.
CodeForSourcePosition(node->position()); CodeForSourcePosition(node->position());
Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT, Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
arg_count, arg_count,
loop_nesting()); loop_nesting());
frame_->RestoreContextRegister(); frame_->RestoreContextRegister();
// Replace the function on the stack with the result. frame_->Push(&result);
frame_->SetElementAt(0, &result);
} else if (var != NULL && var->slot() != NULL && } else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) { var->slot()->type() == Slot::LOOKUP) {
@ -4609,8 +4757,7 @@ void CodeGenerator::VisitCall(Call* node) {
node->position()); node->position());
} else { } else {
// Push the name of the function and the receiver onto the stack. // Push the receiver onto the frame.
frame_->Push(name);
Load(property->obj()); Load(property->obj());
// Load the arguments. // Load the arguments.
@ -4619,14 +4766,16 @@ void CodeGenerator::VisitCall(Call* node) {
Load(args->at(i)); Load(args->at(i));
} }
// Push the name of the function onto the frame.
frame_->Push(name);
// Call the IC initialization code. // Call the IC initialization code.
CodeForSourcePosition(node->position()); CodeForSourcePosition(node->position());
Result result = Result result =
frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count, frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count,
loop_nesting()); loop_nesting());
frame_->RestoreContextRegister(); frame_->RestoreContextRegister();
// Replace the function on the stack with the result. frame_->Push(&result);
frame_->SetElementAt(0, &result);
} }
} else { } else {
@ -5284,8 +5433,6 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
Runtime::Function* function = node->function(); Runtime::Function* function = node->function();
if (function == NULL) { if (function == NULL) {
// Prepare stack for calling JS runtime function.
frame_->Push(node->name());
// Push the builtins object found in the current global object. // Push the builtins object found in the current global object.
Result temp = allocator()->Allocate(); Result temp = allocator()->Allocate();
ASSERT(temp.is_valid()); ASSERT(temp.is_valid());
@ -5302,11 +5449,12 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (function == NULL) { if (function == NULL) {
// Call the JS runtime function. // Call the JS runtime function.
frame_->Push(node->name());
Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET, Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
arg_count, arg_count,
loop_nesting_); loop_nesting_);
frame_->RestoreContextRegister(); frame_->RestoreContextRegister();
frame_->SetElementAt(0, &answer); frame_->Push(&answer);
} else { } else {
// Call the C runtime function. // Call the C runtime function.
Result answer = frame_->CallRuntime(function, arg_count); Result answer = frame_->CallRuntime(function, arg_count);
@ -5974,7 +6122,7 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
} }
Load(left); Load(left);
Load(right); Load(right);
Comparison(cc, strict, destination()); Comparison(node, cc, strict, destination());
} }
@ -6428,7 +6576,7 @@ void Reference::SetValue(InitState init_state) {
// a loop and the key is likely to be a smi. // a loop and the key is likely to be a smi.
Property* property = expression()->AsProperty(); Property* property = expression()->AsProperty();
ASSERT(property != NULL); ASSERT(property != NULL);
SmiAnalysis* key_smi_analysis = property->key()->type(); StaticType* key_smi_analysis = property->key()->type();
if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) { if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
Comment cmnt(masm, "[ Inlined store to keyed Property"); Comment cmnt(masm, "[ Inlined store to keyed Property");
@ -6529,6 +6677,133 @@ void Reference::SetValue(InitState init_state) {
} }
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Clone the boilerplate in new space. Set the context to the
// current context in esi.
Label gc;
__ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
// Get the boilerplate function from the stack.
__ mov(edx, Operand(esp, 1 * kPointerSize));
// Compute the function map in the current global context and set that
// as the map of the allocated object.
__ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
__ mov(ecx, Operand(ecx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
__ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
// Clone the rest of the boilerplate fields. We don't have to update
// the write barrier because the allocated object is in new space.
for (int offset = kPointerSize;
offset < JSFunction::kSize;
offset += kPointerSize) {
if (offset == JSFunction::kContextOffset) {
__ mov(FieldOperand(eax, offset), esi);
} else {
__ mov(ebx, FieldOperand(edx, offset));
__ mov(FieldOperand(eax, offset), ebx);
}
}
// Return and remove the on-stack parameter.
__ ret(1 * kPointerSize);
// Create a new closure through the slower runtime call.
__ bind(&gc);
__ pop(ecx); // Temporarily remove return address.
__ pop(edx);
__ push(esi);
__ push(edx);
__ push(ecx); // Restore return address.
__ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
}
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
__ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
eax, ebx, ecx, &gc, TAG_OBJECT);
// Get the function from the stack.
__ mov(ecx, Operand(esp, 1 * kPointerSize));
// Setup the object header.
__ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
__ mov(FieldOperand(eax, Array::kLengthOffset), Immediate(length));
// Setup the fixed slots.
__ xor_(ebx, Operand(ebx)); // Set to NULL.
__ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
__ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax);
__ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx);
__ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
// Copy the global object from the surrounding context.
__ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
// Initialize the rest of the slots to undefined.
__ mov(ebx, Factory::undefined_value());
for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
__ mov(Operand(eax, Context::SlotOffset(i)), ebx);
}
// Return and remove the on-stack parameter.
__ mov(esi, Operand(eax));
__ ret(1 * kPointerSize);
// Need to collect. Call into runtime system.
__ bind(&gc);
__ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
}
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
int size = JSArray::kSize + elements_size;
// Allocate both the JS array and the elements array in one big
// allocation. This avoid multiple limit checks.
Label gc;
__ AllocateInNewSpace(size, eax, ebx, ecx, &gc, TAG_OBJECT);
// Get the boilerplate from the stack.
__ mov(ecx, Operand(esp, 1 * kPointerSize));
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
__ mov(ebx, FieldOperand(ecx, i));
__ mov(FieldOperand(eax, i), ebx);
}
}
if (length_ > 0) {
// Get hold of the elements array of the boilerplate and setup the
// elements pointer in the resulting object.
__ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
__ lea(edx, Operand(eax, JSArray::kSize));
__ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
// Copy the elements array.
for (int i = 0; i < elements_size; i += kPointerSize) {
__ mov(ebx, FieldOperand(ecx, i));
__ mov(FieldOperand(edx, i), ebx);
}
}
// Return and remove the on-stack parameter.
__ ret(1 * kPointerSize);
__ bind(&gc);
ExternalReference runtime(Runtime::kCloneShallowLiteralBoilerplate);
__ TailCallRuntime(runtime, 1, 1);
}
// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined). // NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
void ToBooleanStub::Generate(MacroAssembler* masm) { void ToBooleanStub::Generate(MacroAssembler* masm) {
Label false_result, true_result, not_string; Label false_result, true_result, not_string;
@ -7441,18 +7716,90 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
static const int kDisplacement = 2 * kPointerSize; static const int kDisplacement = 2 * kPointerSize;
// Check if the calling frame is an arguments adaptor frame. // Check if the calling frame is an arguments adaptor frame.
Label runtime; Label adaptor_frame, try_allocate, runtime;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
__ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &runtime); __ j(equal, &adaptor_frame);
// Get the length from the frame.
__ mov(ecx, Operand(esp, 1 * kPointerSize));
__ jmp(&try_allocate);
// Patch the arguments.length and the parameters pointer. // Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
__ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset)); __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ mov(Operand(esp, 1 * kPointerSize), ecx); __ mov(Operand(esp, 1 * kPointerSize), ecx);
__ lea(edx, Operand(edx, ecx, times_2, kDisplacement)); __ lea(edx, Operand(edx, ecx, times_2, kDisplacement));
__ mov(Operand(esp, 2 * kPointerSize), edx); __ mov(Operand(esp, 2 * kPointerSize), edx);
// Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array.
Label add_arguments_object;
__ bind(&try_allocate);
__ test(ecx, Operand(ecx));
__ j(zero, &add_arguments_object);
__ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
__ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSize));
// Do the allocation of both objects in one go.
__ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
// Get the arguments boilerplate from the current (global) context.
int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
__ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
__ mov(edi, Operand(edi, offset));
// Copy the JS object part.
for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
__ mov(ebx, FieldOperand(edi, i));
__ mov(FieldOperand(eax, i), ebx);
}
// Setup the callee in-object property.
ASSERT(Heap::arguments_callee_index == 0);
__ mov(ebx, Operand(esp, 3 * kPointerSize));
__ mov(FieldOperand(eax, JSObject::kHeaderSize), ebx);
// Get the length (smi tagged) and set that as an in-object property too.
ASSERT(Heap::arguments_length_index == 1);
__ mov(ecx, Operand(esp, 1 * kPointerSize));
__ mov(FieldOperand(eax, JSObject::kHeaderSize + kPointerSize), ecx);
// If there are no actual arguments, we're done.
Label done;
__ test(ecx, Operand(ecx));
__ j(zero, &done);
// Get the parameters pointer from the stack and untag the length.
__ mov(edx, Operand(esp, 2 * kPointerSize));
__ sar(ecx, kSmiTagSize);
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
Immediate(Factory::fixed_array_map()));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
// Copy the fixed array slots.
Label loop;
__ bind(&loop);
__ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
__ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
__ add(Operand(edi), Immediate(kPointerSize));
__ sub(Operand(edx), Immediate(kPointerSize));
__ dec(ecx);
__ test(ecx, Operand(ecx));
__ j(not_zero, &loop);
// Return and remove the on-stack parameters.
__ bind(&done);
__ ret(3 * kPointerSize);
// Do the runtime call to allocate the arguments object. // Do the runtime call to allocate the arguments object.
__ bind(&runtime); __ bind(&runtime);
__ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1); __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
@ -8306,6 +8653,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset)); __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
__ and_(ecx, Operand(edi)); __ and_(ecx, Operand(edi));
ASSERT(kStringEncodingMask == kAsciiStringTag);
__ test(ecx, Immediate(kAsciiStringTag)); __ test(ecx, Immediate(kAsciiStringTag));
__ j(zero, &non_ascii); __ j(zero, &non_ascii);
// Allocate an acsii cons string. // Allocate an acsii cons string.
@ -8348,7 +8696,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
Label non_ascii_string_add_flat_result; Label non_ascii_string_add_flat_result;
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
ASSERT(kAsciiStringTag != 0); ASSERT(kStringEncodingMask == kAsciiStringTag);
__ test(ecx, Immediate(kAsciiStringTag)); __ test(ecx, Immediate(kAsciiStringTag));
__ j(zero, &non_ascii_string_add_flat_result); __ j(zero, &non_ascii_string_add_flat_result);
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));

25
deps/v8/src/ia32/codegen-ia32.h

@ -434,7 +434,7 @@ class CodeGenerator: public AstVisitor {
void GenericBinaryOperation( void GenericBinaryOperation(
Token::Value op, Token::Value op,
SmiAnalysis* type, StaticType* type,
OverwriteMode overwrite_mode); OverwriteMode overwrite_mode);
// If possible, combine two constant smi values using op to produce // If possible, combine two constant smi values using op to produce
@ -447,7 +447,7 @@ class CodeGenerator: public AstVisitor {
void ConstantSmiBinaryOperation(Token::Value op, void ConstantSmiBinaryOperation(Token::Value op,
Result* operand, Result* operand,
Handle<Object> constant_operand, Handle<Object> constant_operand,
SmiAnalysis* type, StaticType* type,
bool reversed, bool reversed,
OverwriteMode overwrite_mode); OverwriteMode overwrite_mode);
@ -459,7 +459,8 @@ class CodeGenerator: public AstVisitor {
Result* right, Result* right,
OverwriteMode overwrite_mode); OverwriteMode overwrite_mode);
void Comparison(Condition cc, void Comparison(AstNode* node,
Condition cc,
bool strict, bool strict,
ControlDestination* destination); ControlDestination* destination);
@ -665,7 +666,8 @@ class GenericBinaryOpStub: public CodeStub {
mode_(mode), mode_(mode),
flags_(flags), flags_(flags),
args_in_registers_(false), args_in_registers_(false),
args_reversed_(false) { args_reversed_(false),
name_(NULL) {
use_sse3_ = CpuFeatures::IsSupported(SSE3); use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
} }
@ -684,6 +686,7 @@ class GenericBinaryOpStub: public CodeStub {
bool args_in_registers_; // Arguments passed in registers not on the stack. bool args_in_registers_; // Arguments passed in registers not on the stack.
bool args_reversed_; // Left and right argument are swapped. bool args_reversed_; // Left and right argument are swapped.
bool use_sse3_; bool use_sse3_;
char* name_;
const char* GetName(); const char* GetName();
@ -725,8 +728,8 @@ class GenericBinaryOpStub: public CodeStub {
bool ArgsInRegistersSupported() { bool ArgsInRegistersSupported() {
return ((op_ == Token::ADD) || (op_ == Token::SUB) return ((op_ == Token::ADD) || (op_ == Token::SUB)
|| (op_ == Token::MUL) || (op_ == Token::DIV)) || (op_ == Token::MUL) || (op_ == Token::DIV))
&& flags_ != NO_SMI_CODE_IN_STUB; && flags_ != NO_SMI_CODE_IN_STUB;
} }
bool IsOperationCommutative() { bool IsOperationCommutative() {
return (op_ == Token::ADD) || (op_ == Token::MUL); return (op_ == Token::ADD) || (op_ == Token::MUL);
@ -760,11 +763,11 @@ class StringAddStub: public CodeStub {
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
void GenerateCopyCharacters(MacroAssembler* masm, void GenerateCopyCharacters(MacroAssembler* masm,
Register desc, Register desc,
Register src, Register src,
Register count, Register count,
Register scratch, Register scratch,
bool ascii); bool ascii);
// Should the stub check whether arguments are strings? // Should the stub check whether arguments are strings?
bool string_check_; bool string_check_;

8
deps/v8/src/ia32/disasm-ia32.cc

@ -1049,6 +1049,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop), NameOfXMMRegister(regop),
NameOfXMMRegister(rm)); NameOfXMMRegister(rm));
data++; data++;
} else if (*data == 0x57) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("xorpd %s,%s",
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else { } else {
UnimplementedInstruction(); UnimplementedInstruction();
} }

381
deps/v8/src/ia32/fast-codegen-ia32.cc

@ -412,46 +412,24 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
Variable* var = decl->proxy()->var(); Variable* var = decl->proxy()->var();
ASSERT(var != NULL); // Must have been resolved. ASSERT(var != NULL); // Must have been resolved.
Slot* slot = var->slot(); Slot* slot = var->slot();
ASSERT(slot != NULL); // No global declarations here. Property* prop = var->AsProperty();
// We have 3 cases for slots: LOOKUP, LOCAL, CONTEXT. if (slot != NULL) {
switch (slot->type()) { switch (slot->type()) {
case Slot::LOOKUP: { case Slot::PARAMETER: // Fall through.
__ push(esi); case Slot::LOCAL:
__ push(Immediate(var->name())); if (decl->mode() == Variable::CONST) {
// Declaration nodes are always introduced in one of two modes. __ mov(Operand(ebp, SlotOffset(var->slot())),
ASSERT(decl->mode() == Variable::VAR || decl->mode() == Variable::CONST); Immediate(Factory::the_hole_value()));
PropertyAttributes attr = } else if (decl->fun() != NULL) {
(decl->mode() == Variable::VAR) ? NONE : READ_ONLY; Visit(decl->fun());
__ push(Immediate(Smi::FromInt(attr))); __ pop(Operand(ebp, SlotOffset(var->slot())));
// Push initial value, if any. }
// Note: For variables we must not push an initial value (such as break;
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value. case Slot::CONTEXT:
if (decl->mode() == Variable::CONST) { // The variable in the decl always resides in the current context.
__ push(Immediate(Factory::the_hole_value())); ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
} else if (decl->fun() != NULL) {
Visit(decl->fun());
} else {
__ push(Immediate(Smi::FromInt(0))); // No initial value!
}
__ CallRuntime(Runtime::kDeclareContextSlot, 4);
break;
}
case Slot::LOCAL:
if (decl->mode() == Variable::CONST) {
__ mov(Operand(ebp, SlotOffset(var->slot())),
Immediate(Factory::the_hole_value()));
} else if (decl->fun() != NULL) {
Visit(decl->fun());
__ pop(Operand(ebp, SlotOffset(var->slot())));
}
break;
case Slot::CONTEXT:
// The variable in the decl always resides in the current context.
ASSERT(function_->scope()->ContextChainLength(slot->var()->scope()) == 0);
if (decl->mode() == Variable::CONST) {
__ mov(eax, Immediate(Factory::the_hole_value()));
if (FLAG_debug_code) { if (FLAG_debug_code) {
// Check if we have the correct context pointer. // Check if we have the correct context pointer.
__ mov(ebx, __ mov(ebx,
@ -459,26 +437,70 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
__ cmp(ebx, Operand(esi)); __ cmp(ebx, Operand(esi));
__ Check(equal, "Unexpected declaration in current context."); __ Check(equal, "Unexpected declaration in current context.");
} }
__ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax); if (decl->mode() == Variable::CONST) {
// No write barrier since the_hole_value is in old space. __ mov(eax, Immediate(Factory::the_hole_value()));
ASSERT(!Heap::InNewSpace(*Factory::the_hole_value())); __ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax);
} else if (decl->fun() != NULL) { // No write barrier since the hole value is in old space.
} else if (decl->fun() != NULL) {
Visit(decl->fun());
__ pop(eax);
__ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax);
int offset = Context::SlotOffset(slot->index());
__ RecordWrite(esi, offset, eax, ecx);
}
break;
case Slot::LOOKUP: {
__ push(esi);
__ push(Immediate(var->name()));
// Declaration nodes are always introduced in one of two modes.
ASSERT(decl->mode() == Variable::VAR ||
decl->mode() == Variable::CONST);
PropertyAttributes attr =
(decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
__ push(Immediate(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (decl->mode() == Variable::CONST) {
__ push(Immediate(Factory::the_hole_value()));
} else if (decl->fun() != NULL) {
Visit(decl->fun());
} else {
__ push(Immediate(Smi::FromInt(0))); // No initial value!
}
__ CallRuntime(Runtime::kDeclareContextSlot, 4);
break;
}
}
} else if (prop != NULL) {
if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
// We are declaring a function or constant that rewrites to a
// property. Use (keyed) IC to set the initial value.
ASSERT_EQ(Expression::kValue, prop->obj()->context());
Visit(prop->obj());
ASSERT_EQ(Expression::kValue, prop->key()->context());
Visit(prop->key());
if (decl->fun() != NULL) {
ASSERT_EQ(Expression::kValue, decl->fun()->context());
Visit(decl->fun()); Visit(decl->fun());
__ pop(eax); __ pop(eax);
if (FLAG_debug_code) { } else {
// Check if we have the correct context pointer. __ Set(eax, Immediate(Factory::the_hole_value()));
__ mov(ebx,
CodeGenerator::ContextOperand(esi, Context::FCONTEXT_INDEX));
__ cmp(ebx, Operand(esi));
__ Check(equal, "Unexpected declaration in current context.");
}
__ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax);
int offset = Context::SlotOffset(slot->index());
__ RecordWrite(esi, offset, eax, ecx);
} }
break;
default: Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
UNREACHABLE(); __ call(ic, RelocInfo::CODE_TARGET);
// Absence of a test eax instruction following the call
// indicates that none of the load was inlined.
// Value in eax is ignored (declarations are statements). Receiver
// and key on stack are discarded.
__ add(Operand(esp), Immediate(2 * kPointerSize));
}
} }
} }
@ -493,20 +515,6 @@ void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
} }
void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
Expression* expr = stmt->expression();
if (expr->AsLiteral() != NULL) {
__ mov(eax, expr->AsLiteral()->handle());
} else {
ASSERT_EQ(Expression::kValue, expr->context());
Visit(expr);
__ pop(eax);
}
EmitReturnSequence(stmt->statement_pos());
}
void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) { void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral"); Comment cmnt(masm_, "[ FunctionLiteral");
@ -527,14 +535,20 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) { void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy"); Comment cmnt(masm_, "[ VariableProxy");
Expression* rewrite = expr->var()->rewrite(); EmitVariableLoad(expr->var(), expr->context());
}
void FastCodeGenerator::EmitVariableLoad(Variable* var,
Expression::Context context) {
Expression* rewrite = var->rewrite();
if (rewrite == NULL) { if (rewrite == NULL) {
ASSERT(expr->var()->is_global()); ASSERT(var->is_global());
Comment cmnt(masm_, "Global variable"); Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in ecx and the global // Use inline caching. Variable name is passed in ecx and the global
// object on the stack. // object on the stack.
__ push(CodeGenerator::GlobalObject()); __ push(CodeGenerator::GlobalObject());
__ mov(ecx, expr->name()); __ mov(ecx, var->name());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET_CONTEXT); __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
// By emitting a nop we make sure that we do not have a test eax // By emitting a nop we make sure that we do not have a test eax
@ -542,8 +556,7 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
// Remember that the assembler may choose to do peephole optimization // Remember that the assembler may choose to do peephole optimization
// (eg, push/pop elimination). // (eg, push/pop elimination).
__ nop(); __ nop();
DropAndMove(context, eax);
DropAndMove(expr->context(), eax);
} else if (rewrite->AsSlot() != NULL) { } else if (rewrite->AsSlot() != NULL) {
Slot* slot = rewrite->AsSlot(); Slot* slot = rewrite->AsSlot();
if (FLAG_debug_code) { if (FLAG_debug_code) {
@ -564,7 +577,7 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
UNREACHABLE(); UNREACHABLE();
} }
} }
Move(expr->context(), slot, eax); Move(context, slot, eax);
} else { } else {
Comment cmnt(masm_, "Variable rewritten to Property"); Comment cmnt(masm_, "Variable rewritten to Property");
// A variable has been rewritten into an explicit access to // A variable has been rewritten into an explicit access to
@ -598,9 +611,8 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
// Notice: We must not have a "test eax, ..." instruction after // Notice: We must not have a "test eax, ..." instruction after
// the call. It is treated specially by the LoadIC code. // the call. It is treated specially by the LoadIC code.
__ nop(); __ nop();
// Drop key and object left on the stack by IC.
// Drop key and object left on the stack by IC, and push the result. DropAndMove(context, eax, 2);
DropAndMove(expr->context(), eax, 2);
} }
} }
@ -634,35 +646,14 @@ void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral"); Comment cmnt(masm_, "[ ObjectLiteral");
Label exists;
// Registers will be used as follows:
// edi = JS function.
// ebx = literals array.
// eax = boilerplate
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ebx, FieldOperand(edi, JSFunction::kLiteralsOffset)); __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
int literal_offset =
FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
__ mov(eax, FieldOperand(ebx, literal_offset));
__ cmp(eax, Factory::undefined_value());
__ j(not_equal, &exists);
// Create boilerplate if it does not exist.
// Literal array (0).
__ push(ebx);
// Literal index (1).
__ push(Immediate(Smi::FromInt(expr->literal_index()))); __ push(Immediate(Smi::FromInt(expr->literal_index())));
// Constant properties (2).
__ push(Immediate(expr->constant_properties())); __ push(Immediate(expr->constant_properties()));
__ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3); if (expr->depth() > 1) {
__ bind(&exists); __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
// eax contains boilerplate.
// Clone boilerplate.
__ push(eax);
if (expr->depth() == 1) {
__ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
} else { } else {
__ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1); __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
} }
// If result_saved == true: The result is saved on top of the // If result_saved == true: The result is saved on top of the
@ -758,31 +749,14 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral"); Comment cmnt(masm_, "[ ArrayLiteral");
Label make_clone;
// Fetch the function's literals array.
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ebx, FieldOperand(ebx, JSFunction::kLiteralsOffset)); __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
// Check if the literal's boilerplate has been instantiated.
int offset =
FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
__ mov(eax, FieldOperand(ebx, offset));
__ cmp(eax, Factory::undefined_value());
__ j(not_equal, &make_clone);
// Instantiate the boilerplate.
__ push(ebx);
__ push(Immediate(Smi::FromInt(expr->literal_index()))); __ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(expr->literals())); __ push(Immediate(expr->literals()));
__ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
__ bind(&make_clone);
// Clone the boilerplate.
__ push(eax);
if (expr->depth() > 1) { if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1); __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else { } else {
__ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1); __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} }
bool result_saved = false; // Is the result saved to the stack? bool result_saved = false; // Is the result saved to the stack?
@ -852,10 +826,37 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} }
void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop,
Expression::Context context) {
Literal* key = prop->key()->AsLiteral();
__ mov(ecx, Immediate(key->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
Move(context, eax);
}
void FastCodeGenerator::EmitKeyedPropertyLoad(Expression::Context context) {
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
Move(context, eax);
}
void FastCodeGenerator::EmitCompoundAssignmentOp(Token::Value op,
Expression::Context context) {
GenericBinaryOpStub stub(op,
NO_OVERWRITE,
NO_GENERIC_BINARY_FLAGS);
__ CallStub(&stub);
Move(context, eax);
}
void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) { void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable(); Variable* var = expr->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL); ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
if (var->is_global()) { if (var->is_global()) {
// Assignment to a global variable. Use inline caching for the // Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in eax, variable name in // assignment. Right-hand-side value is passed in eax, variable name in
@ -960,35 +961,6 @@ void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
UNREACHABLE(); UNREACHABLE();
break; break;
} }
} else {
Property* property = var->rewrite()->AsProperty();
ASSERT_NOT_NULL(property);
// Load object and key onto the stack.
Slot* object_slot = property->obj()->AsSlot();
ASSERT_NOT_NULL(object_slot);
Move(Expression::kValue, object_slot, eax);
Literal* key_literal = property->key()->AsLiteral();
ASSERT_NOT_NULL(key_literal);
Move(Expression::kValue, key_literal);
// Value to store was pushed before object and key on the stack.
__ mov(eax, Operand(esp, 2 * kPointerSize));
// Arguments to ic is value in eax, object and key on stack.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
if (expr->context() == Expression::kEffect) {
__ add(Operand(esp), Immediate(3 * kPointerSize));
} else if (expr->context() == Expression::kValue) {
// Value is still on the stack in esp[2 * kPointerSize]
__ add(Operand(esp), Immediate(2 * kPointerSize));
} else {
__ mov(eax, Operand(esp, 2 * kPointerSize));
DropAndMove(expr->context(), eax, 3);
}
} }
} }
@ -1094,7 +1066,9 @@ void FastCodeGenerator::VisitProperty(Property* expr) {
} }
void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) { void FastCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> name,
RelocInfo::Mode mode) {
// Code common for calls using the IC. // Code common for calls using the IC.
ZoneList<Expression*>* args = expr->arguments(); ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length(); int arg_count = args->length();
@ -1102,16 +1076,15 @@ void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) {
Visit(args->at(i)); Visit(args->at(i));
ASSERT_EQ(Expression::kValue, args->at(i)->context()); ASSERT_EQ(Expression::kValue, args->at(i)->context());
} }
// Record source position for debugger. __ Set(ecx, Immediate(name));
// Record source position of the IC call.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
// Call the IC initialization code. InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
NOT_IN_LOOP); __ call(ic, mode);
__ call(ic, reloc_info);
// Restore context register. // Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS. Move(expr->context(), eax);
DropAndMove(expr->context(), eax);
} }
@ -1128,7 +1101,6 @@ void FastCodeGenerator::EmitCallWithStub(Call* expr) {
__ CallStub(&stub); __ CallStub(&stub);
// Restore context register. // Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
DropAndMove(expr->context(), eax); DropAndMove(expr->context(), eax);
} }
@ -1142,11 +1114,9 @@ void FastCodeGenerator::VisitCall(Call* expr) {
// Call to the identifier 'eval'. // Call to the identifier 'eval'.
UNREACHABLE(); UNREACHABLE();
} else if (var != NULL && !var->is_this() && var->is_global()) { } else if (var != NULL && !var->is_this() && var->is_global()) {
// Call to a global variable. // Push global object as receiver for the call IC.
__ push(Immediate(var->name()));
// Push global object as receiver for the call IC lookup.
__ push(CodeGenerator::GlobalObject()); __ push(CodeGenerator::GlobalObject());
EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT); EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
} else if (var != NULL && var->slot() != NULL && } else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) { var->slot()->type() == Slot::LOOKUP) {
// Call to a lookup slot. // Call to a lookup slot.
@ -1157,9 +1127,8 @@ void FastCodeGenerator::VisitCall(Call* expr) {
Literal* key = prop->key()->AsLiteral(); Literal* key = prop->key()->AsLiteral();
if (key != NULL && key->handle()->IsSymbol()) { if (key != NULL && key->handle()->IsSymbol()) {
// Call to a named property, use call IC. // Call to a named property, use call IC.
__ push(Immediate(key->handle()));
Visit(prop->obj()); Visit(prop->obj());
EmitCallWithIC(expr, RelocInfo::CODE_TARGET); EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else { } else {
// Call to a keyed property, use keyed load IC followed by function // Call to a keyed property, use keyed load IC followed by function
// call. // call.
@ -1251,7 +1220,6 @@ void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) { if (expr->is_jsruntime()) {
// Prepare for calling JS runtime function. // Prepare for calling JS runtime function.
__ push(Immediate(expr->name()));
__ mov(eax, CodeGenerator::GlobalObject()); __ mov(eax, CodeGenerator::GlobalObject());
__ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset)); __ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
} }
@ -1264,19 +1232,18 @@ void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
} }
if (expr->is_jsruntime()) { if (expr->is_jsruntime()) {
// Call the JS runtime function. // Call the JS runtime function via a call IC.
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, __ Set(ecx, Immediate(expr->name()));
NOT_IN_LOOP); InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
__ call(ic, RelocInfo::CODE_TARGET); __ call(ic, RelocInfo::CODE_TARGET);
// Restore context register. // Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
DropAndMove(expr->context(), eax);
} else { } else {
// Call the C runtime function. // Call the C runtime function.
__ CallRuntime(expr->function(), arg_count); __ CallRuntime(expr->function(), arg_count);
Move(expr->context(), eax);
} }
Move(expr->context(), eax);
} }
@ -1685,7 +1652,65 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
} }
#undef __ void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
Move(expr->context(), eax);
}
Register FastCodeGenerator::result_register() { return eax; }
Register FastCodeGenerator::context_register() { return esi; }
void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ mov(Operand(ebp, frame_offset), value);
}
void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
__ mov(dst, CodeGenerator::ContextOperand(esi, context_index));
}
// ----------------------------------------------------------------------------
// Non-local control flow support.
void FastCodeGenerator::EnterFinallyBlock() {
// Cook return address on top of stack (smi encoded Code* delta)
ASSERT(!result_register().is(edx));
__ mov(edx, Operand(esp, 0));
__ sub(Operand(edx), Immediate(masm_->CodeObject()));
ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
ASSERT_EQ(0, kSmiTag);
__ add(edx, Operand(edx)); // Convert to smi.
__ mov(Operand(esp, 0), edx);
// Store result register while executing finally block.
__ push(result_register());
}
void FastCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(edx));
// Restore result register from stack.
__ pop(result_register());
// Uncook return address.
__ mov(edx, Operand(esp, 0));
__ sar(edx, 1); // Convert smi to int.
__ add(Operand(edx), Immediate(masm_->CodeObject()));
__ mov(Operand(esp, 0), edx);
// And return.
__ ret(0);
}
void FastCodeGenerator::ThrowException() {
__ push(result_register());
__ CallRuntime(Runtime::kThrow, 1);
}
#undef __
} } // namespace v8::internal } } // namespace v8::internal

165
deps/v8/src/ia32/ic-ia32.cc

@ -48,9 +48,13 @@ namespace internal {
// must always call a backup property load that is complete. // must always call a backup property load that is complete.
// This function is safe to call if the receiver has fast properties, // This function is safe to call if the receiver has fast properties,
// or if name is not a symbol, and will jump to the miss_label in that case. // or if name is not a symbol, and will jump to the miss_label in that case.
static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label, static void GenerateDictionaryLoad(MacroAssembler* masm,
Register r0, Register r1, Register r2, Label* miss_label,
Register name) { Register r0,
Register r1,
Register r2,
Register name,
DictionaryCheck check_dictionary) {
// Register use: // Register use:
// //
// r0 - used to hold the property dictionary. // r0 - used to hold the property dictionary.
@ -86,11 +90,15 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
__ cmp(r0, JS_BUILTINS_OBJECT_TYPE); __ cmp(r0, JS_BUILTINS_OBJECT_TYPE);
__ j(equal, miss_label, not_taken); __ j(equal, miss_label, not_taken);
// Check that the properties array is a dictionary. // Load properties array.
__ mov(r0, FieldOperand(r1, JSObject::kPropertiesOffset)); __ mov(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
__ cmp(FieldOperand(r0, HeapObject::kMapOffset),
Immediate(Factory::hash_table_map())); // Check that the properties array is a dictionary.
__ j(not_equal, miss_label); if (check_dictionary == CHECK_DICTIONARY) {
__ cmp(FieldOperand(r0, HeapObject::kMapOffset),
Immediate(Factory::hash_table_map()));
__ j(not_equal, miss_label);
}
// Compute the capacity mask. // Compute the capacity mask.
const int kCapacityOffset = const int kCapacityOffset =
@ -223,7 +231,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- esp[4] : name // -- esp[4] : name
// -- esp[8] : receiver // -- esp[8] : receiver
// ----------------------------------- // -----------------------------------
Label slow, check_string, index_int, index_string, check_pixel_array; Label slow, check_string, index_int, index_string;
Label check_pixel_array, probe_dictionary;
// Load name and receiver. // Load name and receiver.
__ mov(eax, Operand(esp, kPointerSize)); __ mov(eax, Operand(esp, kPointerSize));
@ -302,17 +311,72 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ test(ebx, Immediate(String::kIsArrayIndexMask)); __ test(ebx, Immediate(String::kIsArrayIndexMask));
__ j(not_zero, &index_string, not_taken); __ j(not_zero, &index_string, not_taken);
// If the string is a symbol, do a quick inline probe of the receiver's // Is the string a symbol?
// dictionary, if it exists.
__ movzx_b(ebx, FieldOperand(edx, Map::kInstanceTypeOffset)); __ movzx_b(ebx, FieldOperand(edx, Map::kInstanceTypeOffset));
__ test(ebx, Immediate(kIsSymbolMask)); __ test(ebx, Immediate(kIsSymbolMask));
__ j(zero, &slow, not_taken); __ j(zero, &slow, not_taken);
// Probe the dictionary leaving result in ecx.
GenerateDictionaryLoad(masm, &slow, ebx, ecx, edx, eax); // If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary leaving result in ecx.
__ mov(ebx, FieldOperand(ecx, JSObject::kPropertiesOffset));
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(Factory::hash_table_map()));
__ j(equal, &probe_dictionary);
// Load the map of the receiver, compute the keyed lookup cache hash
// based on 32 bits of the map pointer and the string hash.
__ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
__ mov(edx, ebx);
__ shr(edx, KeyedLookupCache::kMapHashShift);
__ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
__ shr(eax, String::kHashShift);
__ xor_(edx, Operand(eax));
__ and_(edx, KeyedLookupCache::kCapacityMask);
// Load the key (consisting of map and symbol) from the cache and
// check for match.
ExternalReference cache_keys
= ExternalReference::keyed_lookup_cache_keys();
__ mov(edi, edx);
__ shl(edi, kPointerSizeLog2 + 1);
__ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow);
__ add(Operand(edi), Immediate(kPointerSize));
__ mov(edi, Operand::StaticArray(edi, times_1, cache_keys));
__ cmp(edi, Operand(esp, kPointerSize));
__ j(not_equal, &slow);
// Get field offset and check that it is an in-object property.
ExternalReference cache_field_offsets
= ExternalReference::keyed_lookup_cache_field_offsets();
__ mov(eax,
Operand::StaticArray(edx, times_pointer_size, cache_field_offsets));
__ movzx_b(edx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
__ cmp(eax, Operand(edx));
__ j(above_equal, &slow);
// Load in-object property.
__ sub(eax, Operand(edx));
__ movzx_b(edx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ add(eax, Operand(edx));
__ mov(eax, FieldOperand(ecx, eax, times_pointer_size, 0));
__ ret(0);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
__ bind(&probe_dictionary);
GenerateDictionaryLoad(masm,
&slow,
ebx,
ecx,
edx,
eax,
DICTIONARY_CHECK_DONE);
GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, edx); GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, edx);
__ mov(eax, Operand(ecx)); __ mov(eax, Operand(ecx));
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1); __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0); __ ret(0);
// If the hash field contains an array index pick it out. The assert checks // If the hash field contains an array index pick it out. The assert checks
// that the constants for the maximum number of digits for an array index // that the constants for the maximum number of digits for an array index
// cached in the hash field and the number of bits reserved for it does not // cached in the hash field and the number of bits reserved for it does not
@ -824,13 +888,16 @@ Object* CallIC_Miss(Arguments args);
void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// ----------------------------------- // -----------------------------------
Label number, non_number, non_string, boolean, probe, miss; Label number, non_number, non_string, boolean, probe, miss;
// Get the receiver of the function from the stack; 1 ~ return address. // Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Get the name of the function from the stack; 2 ~ return address, receiver
__ mov(ecx, Operand(esp, (argc + 2) * kPointerSize));
// Probe the stub cache. // Probe the stub cache.
Code::Flags flags = Code::Flags flags =
@ -876,7 +943,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Cache miss: Jump to runtime. // Cache miss: Jump to runtime.
__ bind(&miss); __ bind(&miss);
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss))); GenerateMiss(masm, argc);
} }
@ -884,27 +951,34 @@ static void GenerateNormalHelper(MacroAssembler* masm,
int argc, int argc,
bool is_global_object, bool is_global_object,
Label* miss) { Label* miss) {
// Search dictionary - put result in register edx. // ----------- S t a t e -------------
GenerateDictionaryLoad(masm, miss, eax, edx, ebx, ecx); // -- ecx : name
// -- edx : receiver
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
// Move the result to register edi and check that it isn't a smi. // Search dictionary - put result in register edi.
__ mov(edi, Operand(edx)); __ mov(edi, edx);
__ test(edx, Immediate(kSmiTagMask)); GenerateDictionaryLoad(masm, miss, eax, edi, ebx, ecx, CHECK_DICTIONARY);
// Check that the result is not a smi.
__ test(edi, Immediate(kSmiTagMask));
__ j(zero, miss, not_taken); __ j(zero, miss, not_taken);
// Check that the value is a JavaScript function. // Check that the value is a JavaScript function, fetching its map into eax.
__ CmpObjectType(edx, JS_FUNCTION_TYPE, edx); __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
__ j(not_equal, miss, not_taken); __ j(not_equal, miss, not_taken);
// Check that the function has been loaded. // Check that the function has been loaded. eax holds function's map.
__ mov(edx, FieldOperand(edi, JSFunction::kMapOffset)); __ mov(eax, FieldOperand(eax, Map::kBitField2Offset));
__ mov(edx, FieldOperand(edx, Map::kBitField2Offset)); __ test(eax, Immediate(1 << Map::kNeedsLoading));
__ test(edx, Immediate(1 << Map::kNeedsLoading));
__ j(not_zero, miss, not_taken); __ j(not_zero, miss, not_taken);
// Patch the receiver with the global proxy if necessary. // Patch the receiver on stack with the global proxy if necessary.
if (is_global_object) { if (is_global_object) {
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
__ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset)); __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
__ mov(Operand(esp, (argc + 1) * kPointerSize), edx); __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
} }
@ -917,14 +991,17 @@ static void GenerateNormalHelper(MacroAssembler* masm,
void CallIC::GenerateNormal(MacroAssembler* masm, int argc) { void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// ----------------------------------- // -----------------------------------
Label miss, global_object, non_global_object; Label miss, global_object, non_global_object;
// Get the receiver of the function from the stack; 1 ~ return address. // Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Get the name of the function from the stack; 2 ~ return address, receiver.
__ mov(ecx, Operand(esp, (argc + 2) * kPointerSize));
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ test(edx, Immediate(kSmiTagMask)); __ test(edx, Immediate(kSmiTagMask));
@ -973,33 +1050,33 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// Cache miss: Jump to runtime. // Cache miss: Jump to runtime.
__ bind(&miss); __ bind(&miss);
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss))); GenerateMiss(masm, argc);
} }
void CallIC::Generate(MacroAssembler* masm, void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
int argc,
const ExternalReference& f) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// ----------------------------------- // -----------------------------------
// Get the receiver of the function from the stack; 1 ~ return address. // Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Get the name of the function to call from the stack.
// 2 ~ receiver, return address.
__ mov(ebx, Operand(esp, (argc + 2) * kPointerSize));
// Enter an internal frame. // Enter an internal frame.
__ EnterInternalFrame(); __ EnterInternalFrame();
// Push the receiver and the name of the function. // Push the receiver and the name of the function.
__ push(edx); __ push(edx);
__ push(ebx); __ push(ecx);
// Call the entry. // Call the entry.
CEntryStub stub(1); CEntryStub stub(1);
__ mov(eax, Immediate(2)); __ mov(eax, Immediate(2));
__ mov(ebx, Immediate(f)); __ mov(ebx, Immediate(ExternalReference(IC_Utility(kCallIC_Miss))));
__ CallStub(&stub); __ CallStub(&stub);
// Move result to edi and exit the internal frame. // Move result to edi and exit the internal frame.
@ -1011,11 +1088,11 @@ void CallIC::Generate(MacroAssembler* masm,
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); // receiver __ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); // receiver
__ test(edx, Immediate(kSmiTagMask)); __ test(edx, Immediate(kSmiTagMask));
__ j(zero, &invoke, not_taken); __ j(zero, &invoke, not_taken);
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
__ cmp(ecx, JS_GLOBAL_OBJECT_TYPE); __ cmp(ebx, JS_GLOBAL_OBJECT_TYPE);
__ j(equal, &global); __ j(equal, &global);
__ cmp(ecx, JS_BUILTINS_OBJECT_TYPE); __ cmp(ebx, JS_BUILTINS_OBJECT_TYPE);
__ j(not_equal, &invoke); __ j(not_equal, &invoke);
// Patch the receiver on the stack. // Patch the receiver on the stack.
@ -1088,7 +1165,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Search the dictionary placing the result in eax. // Search the dictionary placing the result in eax.
__ bind(&probe); __ bind(&probe);
GenerateDictionaryLoad(masm, &miss, edx, eax, ebx, ecx); GenerateDictionaryLoad(masm, &miss, edx, eax, ebx, ecx, CHECK_DICTIONARY);
GenerateCheckNonObjectOrLoaded(masm, &miss, eax, edx); GenerateCheckNonObjectOrLoaded(masm, &miss, eax, edx);
__ ret(0); __ ret(0);

48
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -504,6 +504,13 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
} }
void MacroAssembler::PopTryHandler() {
ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
}
Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg, Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
JSObject* holder, Register holder_reg, JSObject* holder, Register holder_reg,
Register scratch, Register scratch,
@ -834,10 +841,9 @@ void MacroAssembler::AllocateTwoByteString(Register result,
// Calculate the number of bytes needed for the characters in the string while // Calculate the number of bytes needed for the characters in the string while
// observing object alignment. // observing object alignment.
ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
mov(scratch1, length);
ASSERT(kShortSize == 2); ASSERT(kShortSize == 2);
shl(scratch1, 1); // scratch1 = length * 2 + kObjectAlignmentMask.
add(Operand(scratch1), Immediate(kObjectAlignmentMask)); lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
and_(Operand(scratch1), Immediate(~kObjectAlignmentMask)); and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
// Allocate two byte string in new space. // Allocate two byte string in new space.
@ -1016,17 +1022,37 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
void MacroAssembler::CallStub(CodeStub* stub) { void MacroAssembler::CallStub(CodeStub* stub) {
ASSERT(allow_stub_calls()); // calls are not allowed in some stubs ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
call(stub->GetCode(), RelocInfo::CODE_TARGET); call(stub->GetCode(), RelocInfo::CODE_TARGET);
} }
Object* MacroAssembler::TryCallStub(CodeStub* stub) {
ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
Object* result = stub->TryGetCode();
if (!result->IsFailure()) {
call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
}
return result;
}
void MacroAssembler::TailCallStub(CodeStub* stub) { void MacroAssembler::TailCallStub(CodeStub* stub) {
ASSERT(allow_stub_calls()); // calls are not allowed in some stubs ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
jmp(stub->GetCode(), RelocInfo::CODE_TARGET); jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
} }
Object* MacroAssembler::TryTailCallStub(CodeStub* stub) {
ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
Object* result = stub->TryGetCode();
if (!result->IsFailure()) {
jmp(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
}
return result;
}
void MacroAssembler::StubReturn(int argc) { void MacroAssembler::StubReturn(int argc) {
ASSERT(argc >= 1 && generating_stub()); ASSERT(argc >= 1 && generating_stub());
ret((argc - 1) * kPointerSize); ret((argc - 1) * kPointerSize);
@ -1331,6 +1357,18 @@ void MacroAssembler::Ret() {
} }
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
add(Operand(esp), Immediate(stack_elements * kPointerSize));
}
}
void MacroAssembler::Move(Register dst, Handle<Object> value) {
mov(dst, value);
}
void MacroAssembler::SetCounter(StatsCounter* counter, int value) { void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) { if (FLAG_native_code_counters && counter->Enabled()) {
mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value)); mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));

22
deps/v8/src/ia32/macro-assembler-ia32.h

@ -149,6 +149,8 @@ class MacroAssembler: public Assembler {
// address must be pushed before calling this helper. // address must be pushed before calling this helper.
void PushTryHandler(CodeLocation try_location, HandlerType type); void PushTryHandler(CodeLocation try_location, HandlerType type);
// Unlink the stack handler on top of the stack from the try handler chain.
void PopTryHandler();
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Inline caching support // Inline caching support
@ -285,12 +287,22 @@ class MacroAssembler: public Assembler {
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Runtime calls // Runtime calls
// Call a code stub. // Call a code stub. Generate the code if necessary.
void CallStub(CodeStub* stub); void CallStub(CodeStub* stub);
// Tail call a code stub (jump). // Call a code stub and return the code object called. Try to generate
// the code if necessary. Do not perform a GC but instead return a retry
// after GC failure.
Object* TryCallStub(CodeStub* stub);
// Tail call a code stub (jump). Generate the code if necessary.
void TailCallStub(CodeStub* stub); void TailCallStub(CodeStub* stub);
// Tail call a code stub (jump) and return the code object called. Try to
// generate the code if necessary. Do not perform a GC but instead return
// a retry after GC failure.
Object* TryTailCallStub(CodeStub* stub);
// Return from a code stub after popping its arguments. // Return from a code stub after popping its arguments.
void StubReturn(int argc); void StubReturn(int argc);
@ -323,6 +335,12 @@ class MacroAssembler: public Assembler {
void Ret(); void Ret();
void Drop(int element_count);
void Call(Label* target) { call(target); }
void Move(Register target, Handle<Object> value);
struct Unresolved { struct Unresolved {
int pc; int pc;
uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders. uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.

143
deps/v8/src/ia32/stub-cache-ia32.cc

@ -152,11 +152,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
} }
template <typename Pushable>
static void PushInterceptorArguments(MacroAssembler* masm, static void PushInterceptorArguments(MacroAssembler* masm,
Register receiver, Register receiver,
Register holder, Register holder,
Pushable name, Register name,
JSObject* holder_obj) { JSObject* holder_obj) {
__ push(receiver); __ push(receiver);
__ push(holder); __ push(holder);
@ -285,11 +284,10 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
} }
template <class Pushable>
static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm, static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
Register receiver, Register receiver,
Register holder, Register holder,
Pushable name, Register name,
JSObject* holder_obj) { JSObject* holder_obj) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj); PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
@ -495,8 +493,8 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
class CallInterceptorCompiler BASE_EMBEDDED { class CallInterceptorCompiler BASE_EMBEDDED {
public: public:
explicit CallInterceptorCompiler(const ParameterCount& arguments) CallInterceptorCompiler(const ParameterCount& arguments, Register name)
: arguments_(arguments), argc_(arguments.immediate()) {} : arguments_(arguments), argc_(arguments.immediate()), name_(name) {}
void CompileCacheable(MacroAssembler* masm, void CompileCacheable(MacroAssembler* masm,
StubCompiler* stub_compiler, StubCompiler* stub_compiler,
@ -527,17 +525,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
} }
__ EnterInternalFrame(); __ EnterInternalFrame();
__ push(holder); // save the holder __ push(holder); // Save the holder.
__ push(name_); // Save the name.
CompileCallLoadPropertyWithInterceptor( CompileCallLoadPropertyWithInterceptor(masm,
masm, receiver,
receiver, holder,
holder, name_,
// Under EnterInternalFrame this refers to name. holder_obj);
Operand(ebp, (argc_ + 3) * kPointerSize),
holder_obj);
__ pop(receiver); // restore holder __ pop(name_); // Restore the name.
__ pop(receiver); // Restore the holder.
__ LeaveInternalFrame(); __ LeaveInternalFrame();
__ cmp(eax, Factory::no_interceptor_result_sentinel()); __ cmp(eax, Factory::no_interceptor_result_sentinel());
@ -577,11 +575,13 @@ class CallInterceptorCompiler BASE_EMBEDDED {
JSObject* holder_obj, JSObject* holder_obj,
Label* miss_label) { Label* miss_label) {
__ EnterInternalFrame(); __ EnterInternalFrame();
// Save the name_ register across the call.
__ push(name_);
PushInterceptorArguments(masm, PushInterceptorArguments(masm,
receiver, receiver,
holder, holder,
Operand(ebp, (argc_ + 3) * kPointerSize), name_,
holder_obj); holder_obj);
ExternalReference ref = ExternalReference( ExternalReference ref = ExternalReference(
@ -592,12 +592,15 @@ class CallInterceptorCompiler BASE_EMBEDDED {
CEntryStub stub(1); CEntryStub stub(1);
__ CallStub(&stub); __ CallStub(&stub);
// Restore the name_ register.
__ pop(name_);
__ LeaveInternalFrame(); __ LeaveInternalFrame();
} }
private: private:
const ParameterCount& arguments_; const ParameterCount& arguments_;
int argc_; int argc_;
Register name_;
}; };
@ -754,7 +757,7 @@ void StubCompiler::GenerateLoadField(JSObject* object,
} }
void StubCompiler::GenerateLoadCallback(JSObject* object, bool StubCompiler::GenerateLoadCallback(JSObject* object,
JSObject* holder, JSObject* holder,
Register receiver, Register receiver,
Register name_reg, Register name_reg,
@ -762,7 +765,8 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
Register scratch2, Register scratch2,
AccessorInfo* callback, AccessorInfo* callback,
String* name, String* name,
Label* miss) { Label* miss,
Failure** failure) {
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask)); __ test(receiver, Immediate(kSmiTagMask));
__ j(zero, miss, not_taken); __ j(zero, miss, not_taken);
@ -798,7 +802,14 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
Address getter_address = v8::ToCData<Address>(callback->getter()); Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address); ApiFunction fun(getter_address);
ApiGetterEntryStub stub(callback_handle, &fun); ApiGetterEntryStub stub(callback_handle, &fun);
__ CallStub(&stub); // Calling the stub may try to allocate (if the code is not already
// generated). Do not allow the call to perform a garbage
// collection but instead return the allocation failure object.
Object* result = masm()->TryCallStub(&stub);
if (result->IsFailure()) {
*failure = Failure::cast(result);
return false;
}
// We need to avoid using eax since that now holds the result. // We need to avoid using eax since that now holds the result.
Register tmp = other.is(eax) ? reg : other; Register tmp = other.is(eax) ? reg : other;
@ -806,6 +817,7 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
__ LeaveInternalFrame(); __ LeaveInternalFrame();
__ ret(0); __ ret(0);
return true;
} }
@ -885,6 +897,11 @@ Object* CallStubCompiler::CompileCallField(Object* object,
int index, int index,
String* name) { String* name) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// ----------------------------------- // -----------------------------------
Label miss; Label miss;
@ -899,7 +916,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
// Do the right check and compute the holder register. // Do the right check and compute the holder register.
Register reg = Register reg =
CheckPrototypes(JSObject::cast(object), edx, holder, CheckPrototypes(JSObject::cast(object), edx, holder,
ebx, ecx, name, &miss); ebx, eax, name, &miss);
GenerateFastPropertyLoad(masm(), edi, reg, holder, index); GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
@ -935,6 +952,11 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
String* name, String* name,
CheckType check) { CheckType check) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// ----------------------------------- // -----------------------------------
Label miss; Label miss;
@ -956,7 +978,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
case RECEIVER_MAP_CHECK: case RECEIVER_MAP_CHECK:
// Check that the maps haven't changed. // Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), edx, holder, CheckPrototypes(JSObject::cast(object), edx, holder,
ebx, ecx, name, &miss); ebx, eax, name, &miss);
// Patch the receiver on the stack with the global proxy if // Patch the receiver on the stack with the global proxy if
// necessary. // necessary.
@ -968,15 +990,15 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
case STRING_CHECK: case STRING_CHECK:
// Check that the object is a two-byte string or a symbol. // Check that the object is a two-byte string or a symbol.
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); __ mov(eax, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
__ cmp(ecx, FIRST_NONSTRING_TYPE); __ cmp(eax, FIRST_NONSTRING_TYPE);
__ j(above_equal, &miss, not_taken); __ j(above_equal, &miss, not_taken);
// Check that the maps starting from the prototype haven't changed. // Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(), GenerateLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX, Context::STRING_FUNCTION_INDEX,
ecx); eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder, CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, name, &miss); ebx, edx, name, &miss);
break; break;
@ -985,14 +1007,14 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the object is a smi or a heap number. // Check that the object is a smi or a heap number.
__ test(edx, Immediate(kSmiTagMask)); __ test(edx, Immediate(kSmiTagMask));
__ j(zero, &fast, taken); __ j(zero, &fast, taken);
__ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx); __ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
__ j(not_equal, &miss, not_taken); __ j(not_equal, &miss, not_taken);
__ bind(&fast); __ bind(&fast);
// Check that the maps starting from the prototype haven't changed. // Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(), GenerateLoadGlobalFunctionPrototype(masm(),
Context::NUMBER_FUNCTION_INDEX, Context::NUMBER_FUNCTION_INDEX,
ecx); eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder, CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, name, &miss); ebx, edx, name, &miss);
break; break;
} }
@ -1008,15 +1030,15 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the maps starting from the prototype haven't changed. // Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(), GenerateLoadGlobalFunctionPrototype(masm(),
Context::BOOLEAN_FUNCTION_INDEX, Context::BOOLEAN_FUNCTION_INDEX,
ecx); eax);
CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder, CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, name, &miss); ebx, edx, name, &miss);
break; break;
} }
case JSARRAY_HAS_FAST_ELEMENTS_CHECK: case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
CheckPrototypes(JSObject::cast(object), edx, holder, CheckPrototypes(JSObject::cast(object), edx, holder,
ebx, ecx, name, &miss); ebx, eax, name, &miss);
// Make sure object->HasFastElements(). // Make sure object->HasFastElements().
// Get the elements array of the object. // Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
@ -1059,6 +1081,11 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
JSObject* holder, JSObject* holder,
String* name) { String* name) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// ----------------------------------- // -----------------------------------
Label miss; Label miss;
@ -1071,7 +1098,7 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
// Get the receiver from the stack. // Get the receiver from the stack.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
CallInterceptorCompiler compiler(arguments()); CallInterceptorCompiler compiler(arguments(), ecx);
CompileLoadInterceptor(&compiler, CompileLoadInterceptor(&compiler,
this, this,
masm(), masm(),
@ -1081,7 +1108,7 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
&lookup, &lookup,
edx, edx,
ebx, ebx,
ecx, edi,
&miss); &miss);
// Restore receiver. // Restore receiver.
@ -1120,6 +1147,11 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
JSFunction* function, JSFunction* function,
String* name) { String* name) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// ----------------------------------- // -----------------------------------
Label miss; Label miss;
@ -1138,15 +1170,32 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
} }
// Check that the maps haven't changed. // Check that the maps haven't changed.
CheckPrototypes(object, edx, holder, ebx, ecx, name, &miss); CheckPrototypes(object, edx, holder, ebx, eax, name, &miss);
// Get the value from the cell. // Get the value from the cell.
__ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell))); __ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
__ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset)); __ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function. // Check that the cell contains the same function.
__ cmp(Operand(edi), Immediate(Handle<JSFunction>(function))); if (Heap::InNewSpace(function)) {
__ j(not_equal, &miss, not_taken); // We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
// function can all use this call IC. Before we load through the
// function, we have to verify that it still is a function.
__ test(edi, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
__ j(not_equal, &miss, not_taken);
// Check the shared function info. Make sure it hasn't changed.
__ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset),
Immediate(Handle<SharedFunctionInfo>(function->shared())));
__ j(not_equal, &miss, not_taken);
} else {
__ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
__ j(not_equal, &miss, not_taken);
}
// Patch the receiver on the stack with the global proxy. // Patch the receiver on the stack with the global proxy.
if (object->IsGlobalObject()) { if (object->IsGlobalObject()) {
@ -1420,10 +1469,10 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
} }
Object* LoadStubCompiler::CompileLoadCallback(JSObject* object, Object* LoadStubCompiler::CompileLoadCallback(String* name,
JSObject* object,
JSObject* holder, JSObject* holder,
AccessorInfo* callback, AccessorInfo* callback) {
String* name) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- ecx : name // -- ecx : name
// -- esp[0] : return address // -- esp[0] : return address
@ -1432,8 +1481,11 @@ Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
Label miss; Label miss;
__ mov(eax, Operand(esp, kPointerSize)); __ mov(eax, Operand(esp, kPointerSize));
GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, Failure* failure = Failure::InternalError();
callback, name, &miss); bool success = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
callback, name, &miss, &failure);
if (!success) return failure;
__ bind(&miss); __ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC); GenerateLoadMiss(masm(), Code::LOAD_IC);
@ -1597,8 +1649,11 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
__ cmp(Operand(eax), Immediate(Handle<String>(name))); __ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken); __ j(not_equal, &miss, not_taken);
GenerateLoadCallback(receiver, holder, ecx, eax, ebx, edx, Failure* failure = Failure::InternalError();
callback, name, &miss); bool success = GenerateLoadCallback(receiver, holder, ecx, eax, ebx, edx,
callback, name, &miss, &failure);
if (!success) return failure;
__ bind(&miss); __ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_callback, 1); __ DecrementCounter(&Counters::keyed_load_callback, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);

11
deps/v8/src/ia32/virtual-frame-ia32.cc

@ -925,14 +925,17 @@ Result VirtualFrame::CallKeyedStoreIC() {
Result VirtualFrame::CallCallIC(RelocInfo::Mode mode, Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
int arg_count, int arg_count,
int loop_nesting) { int loop_nesting) {
// Arguments, receiver, and function name are on top of the frame. // Function name, arguments, and receiver are on top of the frame.
// The IC expects them on the stack. It does not drop the function // The IC expects the name in ecx and the rest on the stack and
// name slot (but it does drop the rest). // drops them all.
InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP; InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = cgen()->ComputeCallInitialize(arg_count, in_loop); Handle<Code> ic = cgen()->ComputeCallInitialize(arg_count, in_loop);
// Spill args, receiver, and function. The call will drop args and // Spill args, receiver, and function. The call will drop args and
// receiver. // receiver.
PrepareForCall(arg_count + 2, arg_count + 1); Result name = Pop();
PrepareForCall(arg_count + 1, arg_count + 1); // Arguments + receiver.
name.ToRegister(ecx);
name.Unuse();
return RawCallCodeObject(ic, mode); return RawCallCodeObject(ic, mode);
} }

6
deps/v8/src/ia32/virtual-frame-ia32.h

@ -341,9 +341,9 @@ class VirtualFrame: public ZoneObject {
// of the frame. Key and receiver are not dropped. // of the frame. Key and receiver are not dropped.
Result CallKeyedStoreIC(); Result CallKeyedStoreIC();
// Call call IC. Arguments, reciever, and function name are found // Call call IC. Function name, arguments, and receiver are found on top
// on top of the frame. Function name slot is not dropped. The // of the frame and dropped by the call. The argument count does not
// argument count does not include the receiver. // include the receiver.
Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting); Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
// Allocate and call JS function as constructor. Arguments, // Allocate and call JS function as constructor. Arguments,

22
deps/v8/src/ic.cc

@ -409,7 +409,7 @@ Object* CallIC::LoadFunction(State state,
if (!lookup.IsValid()) { if (!lookup.IsValid()) {
// If the object does not have the requested property, check which // If the object does not have the requested property, check which
// exception we need to throw. // exception we need to throw.
if (is_contextual()) { if (IsContextual(object)) {
return ReferenceError("not_defined", name); return ReferenceError("not_defined", name);
} }
return TypeError("undefined_method", object, name); return TypeError("undefined_method", object, name);
@ -428,7 +428,7 @@ Object* CallIC::LoadFunction(State state,
// If the object does not have the requested property, check which // If the object does not have the requested property, check which
// exception we need to throw. // exception we need to throw.
if (attr == ABSENT) { if (attr == ABSENT) {
if (is_contextual()) { if (IsContextual(object)) {
return ReferenceError("not_defined", name); return ReferenceError("not_defined", name);
} }
return TypeError("undefined_method", object, name); return TypeError("undefined_method", object, name);
@ -628,7 +628,7 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
// If lookup is invalid, check if we need to throw an exception. // If lookup is invalid, check if we need to throw an exception.
if (!lookup.IsValid()) { if (!lookup.IsValid()) {
if (FLAG_strict || is_contextual()) { if (FLAG_strict || IsContextual(object)) {
return ReferenceError("not_defined", name); return ReferenceError("not_defined", name);
} }
LOG(SuspectReadEvent(*name, *object)); LOG(SuspectReadEvent(*name, *object));
@ -671,7 +671,7 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
// If the property is not present, check if we need to throw an // If the property is not present, check if we need to throw an
// exception. // exception.
if (attr == ABSENT && is_contextual()) { if (attr == ABSENT && IsContextual(object)) {
return ReferenceError("not_defined", name); return ReferenceError("not_defined", name);
} }
return result; return result;
@ -843,7 +843,7 @@ Object* KeyedLoadIC::Load(State state,
// If lookup is invalid, check if we need to throw an exception. // If lookup is invalid, check if we need to throw an exception.
if (!lookup.IsValid()) { if (!lookup.IsValid()) {
if (FLAG_strict || is_contextual()) { if (FLAG_strict || IsContextual(object)) {
return ReferenceError("not_defined", name); return ReferenceError("not_defined", name);
} }
} }
@ -859,7 +859,7 @@ Object* KeyedLoadIC::Load(State state,
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
// If the property is not present, check if we need to throw an // If the property is not present, check if we need to throw an
// exception. // exception.
if (attr == ABSENT && is_contextual()) { if (attr == ABSENT && IsContextual(object)) {
return ReferenceError("not_defined", name); return ReferenceError("not_defined", name);
} }
return result; return result;
@ -1292,16 +1292,6 @@ Object* CallIC_Miss(Arguments args) {
} }
void CallIC::GenerateInitialize(MacroAssembler* masm, int argc) {
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
}
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
}
// Used from ic_<arch>.cc. // Used from ic_<arch>.cc.
Object* LoadIC_Miss(Arguments args) { Object* LoadIC_Miss(Arguments args) {
NoHandleAllocation na; NoHandleAllocation na;

24
deps/v8/src/ic.h

@ -33,6 +33,11 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// Flag indicating whether an IC stub needs to check that a backing
// store is in dictionary case.
enum DictionaryCheck { CHECK_DICTIONARY, DICTIONARY_CHECK_DONE };
// IC_UTIL_LIST defines all utility functions called from generated // IC_UTIL_LIST defines all utility functions called from generated
// inline caching code. The argument for the macro, ICU, is the function name. // inline caching code. The argument for the macro, ICU, is the function name.
#define IC_UTIL_LIST(ICU) \ #define IC_UTIL_LIST(ICU) \
@ -99,7 +104,16 @@ class IC {
// Returns if this IC is for contextual (no explicit receiver) // Returns if this IC is for contextual (no explicit receiver)
// access to properties. // access to properties.
bool is_contextual() { bool IsContextual(Handle<Object> receiver) {
if (receiver->IsGlobalObject()) {
return SlowIsContextual();
} else {
ASSERT(!SlowIsContextual());
return false;
}
}
bool SlowIsContextual() {
return ComputeMode() == RelocInfo::CODE_TARGET_CONTEXT; return ComputeMode() == RelocInfo::CODE_TARGET_CONTEXT;
} }
@ -175,16 +189,14 @@ class CallIC: public IC {
// Code generator routines. // Code generator routines.
static void GenerateInitialize(MacroAssembler* masm, int argc); static void GenerateInitialize(MacroAssembler* masm, int argc) {
GenerateMiss(masm, argc);
}
static void GenerateMiss(MacroAssembler* masm, int argc); static void GenerateMiss(MacroAssembler* masm, int argc);
static void GenerateMegamorphic(MacroAssembler* masm, int argc); static void GenerateMegamorphic(MacroAssembler* masm, int argc);
static void GenerateNormal(MacroAssembler* masm, int argc); static void GenerateNormal(MacroAssembler* masm, int argc);
private: private:
static void Generate(MacroAssembler* masm,
int argc,
const ExternalReference& f);
// Update the inline cache and the global stub cache based on the // Update the inline cache and the global stub cache based on the
// lookup result. // lookup result.
void UpdateCaches(LookupResult* lookup, void UpdateCaches(LookupResult* lookup,

5
deps/v8/src/macro-assembler.h

@ -77,8 +77,13 @@ enum AllocationFlags {
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h" #include "arm/constants-arm.h"
#include "assembler.h" #include "assembler.h"
#ifdef V8_ARM_VARIANT_THUMB
#include "arm/assembler-thumb2.h"
#include "arm/assembler-thumb2-inl.h"
#else
#include "arm/assembler-arm.h" #include "arm/assembler-arm.h"
#include "arm/assembler-arm-inl.h" #include "arm/assembler-arm-inl.h"
#endif
#include "code.h" // must be after assembler_*.h #include "code.h" // must be after assembler_*.h
#include "arm/macro-assembler-arm.h" #include "arm/macro-assembler-arm.h"
#else #else

74
deps/v8/src/mark-compact.cc

@ -155,6 +155,8 @@ void MarkCompactCollector::Finish() {
// objects (empty string, illegal builtin). // objects (empty string, illegal builtin).
StubCache::Clear(); StubCache::Clear();
ExternalStringTable::CleanUp();
// If we've just compacted old space there's no reason to check the // If we've just compacted old space there's no reason to check the
// fragmentation limit. Just return. // fragmentation limit. Just return.
if (HasCompacted()) return; if (HasCompacted()) return;
@ -369,41 +371,18 @@ class RootMarkingVisitor : public ObjectVisitor {
class SymbolTableCleaner : public ObjectVisitor { class SymbolTableCleaner : public ObjectVisitor {
public: public:
SymbolTableCleaner() : pointers_removed_(0) { } SymbolTableCleaner() : pointers_removed_(0) { }
void VisitPointers(Object** start, Object** end) {
virtual void VisitPointers(Object** start, Object** end) {
// Visit all HeapObject pointers in [start, end). // Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) { for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) { if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
// Check if the symbol being pruned is an external symbol. We need to // Check if the symbol being pruned is an external symbol. We need to
// delete the associated external data as this symbol is going away. // delete the associated external data as this symbol is going away.
// Since the object is not marked we can access its map word safely
// without having to worry about marking bits in the object header.
Map* map = HeapObject::cast(*p)->map();
// Since no objects have yet been moved we can safely access the map of // Since no objects have yet been moved we can safely access the map of
// the object. // the object.
uint32_t type = map->instance_type(); if ((*p)->IsExternalString()) {
bool is_external = (type & kStringRepresentationMask) == Heap::FinalizeExternalString(String::cast(*p));
kExternalStringTag;
if (is_external) {
bool is_two_byte = (type & kStringEncodingMask) == kTwoByteStringTag;
byte* resource_addr = reinterpret_cast<byte*>(*p) +
ExternalString::kResourceOffset -
kHeapObjectTag;
if (is_two_byte) {
v8::String::ExternalStringResource** resource =
reinterpret_cast<v8::String::ExternalStringResource**>
(resource_addr);
delete *resource;
// Clear the resource pointer in the symbol.
*resource = NULL;
} else {
v8::String::ExternalAsciiStringResource** resource =
reinterpret_cast<v8::String::ExternalAsciiStringResource**>
(resource_addr);
delete *resource;
// Clear the resource pointer in the symbol.
*resource = NULL;
}
} }
// Set the entry to null_value (as deleted). // Set the entry to null_value (as deleted).
*p = Heap::raw_unchecked_null_value(); *p = Heap::raw_unchecked_null_value();
@ -546,34 +525,7 @@ bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
} }
class SymbolMarkingVisitor : public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
MarkingVisitor marker;
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
HeapObject* object = HeapObject::cast(*p);
// If the object is marked, we have marked or are in the process
// of marking subparts.
if (object->IsMarked()) continue;
// The object is unmarked, we do not need to unmark to use its
// map.
Map* map = object->map();
object->IterateBody(map->instance_type(),
object->SizeFromMap(map),
&marker);
}
}
};
void MarkCompactCollector::MarkSymbolTable() { void MarkCompactCollector::MarkSymbolTable() {
// Objects reachable from symbols are marked as live so as to ensure
// that if the symbol itself remains alive after GC for any reason,
// and if it is a cons string backed by an external string (even indirectly),
// then the external string does not receive a weak reference callback.
SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table(); SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
// Mark the symbol table itself. // Mark the symbol table itself.
SetMark(symbol_table); SetMark(symbol_table);
@ -581,11 +533,6 @@ void MarkCompactCollector::MarkSymbolTable() {
MarkingVisitor marker; MarkingVisitor marker;
symbol_table->IteratePrefix(&marker); symbol_table->IteratePrefix(&marker);
ProcessMarkingStack(&marker); ProcessMarkingStack(&marker);
// Mark subparts of the symbols but not the symbols themselves
// (unless reachable from another symbol).
SymbolMarkingVisitor symbol_marker;
symbol_table->IterateElements(&symbol_marker);
ProcessMarkingStack(&marker);
} }
@ -774,6 +721,8 @@ void MarkCompactCollector::MarkLiveObjects() {
SymbolTableCleaner v; SymbolTableCleaner v;
symbol_table->IterateElements(&v); symbol_table->IterateElements(&v);
symbol_table->ElementsRemoved(v.PointersRemoved()); symbol_table->ElementsRemoved(v.PointersRemoved());
ExternalStringTable::Iterate(&v);
ExternalStringTable::CleanUp();
// Remove object groups after marking phase. // Remove object groups after marking phase.
GlobalHandles::RemoveObjectGroups(); GlobalHandles::RemoveObjectGroups();
@ -887,11 +836,8 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
// space are encoded in their map pointer word (along with an encoding of // space are encoded in their map pointer word (along with an encoding of
// their map pointers). // their map pointers).
// //
// 31 21 20 10 9 0 // The excact encoding is described in the comments for class MapWord in
// +-----------------+------------------+-----------------+ // objects.h.
// |forwarding offset|page offset of map|page index of map|
// +-----------------+------------------+-----------------+
// 11 bits 11 bits 10 bits
// //
// An address range [start, end) can have both live and non-live objects. // An address range [start, end) can have both live and non-live objects.
// Maximal non-live regions are marked so they can be skipped on subsequent // Maximal non-live regions are marked so they can be skipped on subsequent

35
deps/v8/src/math.js

@ -29,7 +29,6 @@
// Keep reference to original values of some global properties. This // Keep reference to original values of some global properties. This
// has the added benefit that the code in this file is isolated from // has the added benefit that the code in this file is isolated from
// changes to these properties. // changes to these properties.
const $Infinity = global.Infinity;
const $floor = MathFloor; const $floor = MathFloor;
const $random = MathRandom; const $random = MathRandom;
const $abs = MathAbs; const $abs = MathAbs;
@ -118,26 +117,40 @@ function MathLog(x) {
// ECMA 262 - 15.8.2.11 // ECMA 262 - 15.8.2.11
function MathMax(arg1, arg2) { // length == 2 function MathMax(arg1, arg2) { // length == 2
var r = -$Infinity;
var length = %_ArgumentsLength(); var length = %_ArgumentsLength();
for (var i = 0; i < length; i++) { if (length == 0) {
var n = ToNumber(%_Arguments(i)); return -1/0; // Compiler constant-folds this to -Infinity.
}
var r = arg1;
if (!IS_NUMBER(r)) r = ToNumber(r);
if (NUMBER_IS_NAN(r)) return r;
for (var i = 1; i < length; i++) {
var n = %_Arguments(i);
if (!IS_NUMBER(n)) n = ToNumber(n);
if (NUMBER_IS_NAN(n)) return n; if (NUMBER_IS_NAN(n)) return n;
// Make sure +0 is considered greater than -0. // Make sure +0 is considered greater than -0. -0 is never a Smi, +0 can be
if (n > r || (r === 0 && n === 0 && !%_IsSmi(r))) r = n; // a Smi or heap number.
if (n > r || (r === 0 && n === 0 && !%_IsSmi(r) && 1 / r < 0)) r = n;
} }
return r; return r;
} }
// ECMA 262 - 15.8.2.12 // ECMA 262 - 15.8.2.12
function MathMin(arg1, arg2) { // length == 2 function MathMin(arg1, arg2) { // length == 2
var r = $Infinity;
var length = %_ArgumentsLength(); var length = %_ArgumentsLength();
for (var i = 0; i < length; i++) { if (length == 0) {
var n = ToNumber(%_Arguments(i)); return 1/0; // Compiler constant-folds this to Infinity.
}
var r = arg1;
if (!IS_NUMBER(r)) r = ToNumber(r);
if (NUMBER_IS_NAN(r)) return r;
for (var i = 1; i < length; i++) {
var n = %_Arguments(i);
if (!IS_NUMBER(n)) n = ToNumber(n);
if (NUMBER_IS_NAN(n)) return n; if (NUMBER_IS_NAN(n)) return n;
// Make sure -0 is considered less than +0. // Make sure -0 is considered less than +0. -0 is never a Smi, +0 can b a
if (n < r || (r === 0 && n === 0 && !%_IsSmi(n))) r = n; // Smi or a heap number.
if (n < r || (r === 0 && n === 0 && !%_IsSmi(n) && 1 / n < 0)) r = n;
} }
return r; return r;
} }

5
deps/v8/src/messages.js

@ -157,6 +157,11 @@ function FormatMessage(message) {
instanceof_nonobject_proto: "Function has non-object prototype '%0' in instanceof check", instanceof_nonobject_proto: "Function has non-object prototype '%0' in instanceof check",
null_to_object: "Cannot convert null to object", null_to_object: "Cannot convert null to object",
reduce_no_initial: "Reduce of empty array with no initial value", reduce_no_initial: "Reduce of empty array with no initial value",
getter_must_be_callable: "Getter must be a function: %0",
setter_must_be_callable: "Setter must be a function: %0",
value_and_accessor: "Invalid property. A property cannot both have accessors and be writable or have a value: %0",
proto_object_or_null: "Object prototype may only be an Object or null",
property_desc_object: "Property description must be an object: %0",
// RangeError // RangeError
invalid_array_length: "Invalid array length", invalid_array_length: "Invalid array length",
stack_overflow: "Maximum call stack size exceeded", stack_overflow: "Maximum call stack size exceeded",

12
deps/v8/src/objects-inl.h

@ -952,14 +952,14 @@ MapWord MapWord::EncodeAddress(Address map_address, int offset) {
// exceed the object area size of a page. // exceed the object area size of a page.
ASSERT(0 <= offset && offset < Page::kObjectAreaSize); ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
int compact_offset = offset >> kObjectAlignmentBits; uintptr_t compact_offset = offset >> kObjectAlignmentBits;
ASSERT(compact_offset < (1 << kForwardingOffsetBits)); ASSERT(compact_offset < (1 << kForwardingOffsetBits));
Page* map_page = Page::FromAddress(map_address); Page* map_page = Page::FromAddress(map_address);
ASSERT_MAP_PAGE_INDEX(map_page->mc_page_index); ASSERT_MAP_PAGE_INDEX(map_page->mc_page_index);
int map_page_offset = uintptr_t map_page_offset =
map_page->Offset(map_address) >> kObjectAlignmentBits; map_page->Offset(map_address) >> kMapAlignmentBits;
uintptr_t encoding = uintptr_t encoding =
(compact_offset << kForwardingOffsetShift) | (compact_offset << kForwardingOffsetShift) |
@ -975,8 +975,8 @@ Address MapWord::DecodeMapAddress(MapSpace* map_space) {
ASSERT_MAP_PAGE_INDEX(map_page_index); ASSERT_MAP_PAGE_INDEX(map_page_index);
int map_page_offset = static_cast<int>( int map_page_offset = static_cast<int>(
((value_ & kMapPageOffsetMask) >> kMapPageOffsetShift) ((value_ & kMapPageOffsetMask) >> kMapPageOffsetShift) <<
<< kObjectAlignmentBits); kMapAlignmentBits);
return (map_space->PageAddress(map_page_index) + map_page_offset); return (map_space->PageAddress(map_page_index) + map_page_offset);
} }
@ -1499,7 +1499,7 @@ void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
// Range check. // Range check.
ASSERT(descriptor_number < number_of_descriptors()); ASSERT(descriptor_number < number_of_descriptors());
// Make sure non of the elements in desc are in new space. // Make sure none of the elements in desc are in new space.
ASSERT(!Heap::InNewSpace(desc->GetKey())); ASSERT(!Heap::InNewSpace(desc->GetKey()));
ASSERT(!Heap::InNewSpace(desc->GetValue())); ASSERT(!Heap::InNewSpace(desc->GetValue()));

14
deps/v8/src/objects.cc

@ -1351,6 +1351,8 @@ Object* JSObject::AddFastProperty(String* name,
Object* JSObject::AddConstantFunctionProperty(String* name, Object* JSObject::AddConstantFunctionProperty(String* name,
JSFunction* function, JSFunction* function,
PropertyAttributes attributes) { PropertyAttributes attributes) {
ASSERT(!Heap::InNewSpace(function));
// Allocate new instance descriptors with (name, function) added // Allocate new instance descriptors with (name, function) added
ConstantFunctionDescriptor d(name, function, attributes); ConstantFunctionDescriptor d(name, function, attributes);
Object* new_descriptors = Object* new_descriptors =
@ -1437,7 +1439,7 @@ Object* JSObject::AddProperty(String* name,
// Ensure the descriptor array does not get too big. // Ensure the descriptor array does not get too big.
if (map()->instance_descriptors()->number_of_descriptors() < if (map()->instance_descriptors()->number_of_descriptors() <
DescriptorArray::kMaxNumberOfDescriptors) { DescriptorArray::kMaxNumberOfDescriptors) {
if (value->IsJSFunction()) { if (value->IsJSFunction() && !Heap::InNewSpace(value)) {
return AddConstantFunctionProperty(name, return AddConstantFunctionProperty(name,
JSFunction::cast(value), JSFunction::cast(value),
attributes); attributes);
@ -3254,7 +3256,8 @@ Object* DescriptorArray::Allocate(int number_of_descriptors) {
return Heap::empty_descriptor_array(); return Heap::empty_descriptor_array();
} }
// Allocate the array of keys. // Allocate the array of keys.
Object* array = Heap::AllocateFixedArray(ToKeyIndex(number_of_descriptors)); Object* array =
Heap::AllocateFixedArray(ToKeyIndex(number_of_descriptors));
if (array->IsFailure()) return array; if (array->IsFailure()) return array;
// Do not use DescriptorArray::cast on incomplete object. // Do not use DescriptorArray::cast on incomplete object.
FixedArray* result = FixedArray::cast(array); FixedArray* result = FixedArray::cast(array);
@ -7962,7 +7965,10 @@ Object* StringDictionary::TransformPropertiesToFastFor(
PropertyType type = DetailsAt(i).type(); PropertyType type = DetailsAt(i).type();
ASSERT(type != FIELD); ASSERT(type != FIELD);
instance_descriptor_length++; instance_descriptor_length++;
if (type == NORMAL && !value->IsJSFunction()) number_of_fields += 1; if (type == NORMAL &&
(!value->IsJSFunction() || Heap::InNewSpace(value))) {
number_of_fields += 1;
}
} }
} }
@ -7993,7 +7999,7 @@ Object* StringDictionary::TransformPropertiesToFastFor(
PropertyDetails details = DetailsAt(i); PropertyDetails details = DetailsAt(i);
PropertyType type = details.type(); PropertyType type = details.type();
if (value->IsJSFunction()) { if (value->IsJSFunction() && !Heap::InNewSpace(value)) {
ConstantFunctionDescriptor d(String::cast(key), ConstantFunctionDescriptor d(String::cast(key),
JSFunction::cast(value), JSFunction::cast(value),
details.attributes(), details.attributes(),

47
deps/v8/src/objects.h

@ -892,15 +892,25 @@ class MapWord BASE_EMBEDDED {
static const int kOverflowBit = 1; // overflow bit static const int kOverflowBit = 1; // overflow bit
static const int kOverflowMask = (1 << kOverflowBit); // overflow mask static const int kOverflowMask = (1 << kOverflowBit); // overflow mask
// Forwarding pointers and map pointer encoding // Forwarding pointers and map pointer encoding. On 32 bit all the bits are
// 31 21 20 10 9 0 // used.
// +-----------------+------------------+-----------------+ // +-----------------+------------------+-----------------+
// |forwarding offset|page offset of map|page index of map| // |forwarding offset|page offset of map|page index of map|
// +-----------------+------------------+-----------------+ // +-----------------+------------------+-----------------+
// 11 bits 11 bits 10 bits // ^ ^ ^
static const int kMapPageIndexBits = 10; // | | |
static const int kMapPageOffsetBits = 11; // | | kMapPageIndexBits
static const int kForwardingOffsetBits = 11; // | kMapPageOffsetBits
// kForwardingOffsetBits
static const int kMapPageOffsetBits = kPageSizeBits - kMapAlignmentBits;
static const int kForwardingOffsetBits = kPageSizeBits - kObjectAlignmentBits;
#ifdef V8_HOST_ARCH_64_BIT
static const int kMapPageIndexBits = 16;
#else
// Use all the 32-bits to encode on a 32-bit platform.
static const int kMapPageIndexBits =
32 - (kMapPageOffsetBits + kForwardingOffsetBits);
#endif
static const int kMapPageIndexShift = 0; static const int kMapPageIndexShift = 0;
static const int kMapPageOffsetShift = static const int kMapPageOffsetShift =
@ -908,16 +918,12 @@ class MapWord BASE_EMBEDDED {
static const int kForwardingOffsetShift = static const int kForwardingOffsetShift =
kMapPageOffsetShift + kMapPageOffsetBits; kMapPageOffsetShift + kMapPageOffsetBits;
// 0x000003FF // Bit masks covering the different parts the encoding.
static const uint32_t kMapPageIndexMask = static const uintptr_t kMapPageIndexMask =
(1 << kMapPageOffsetShift) - 1; (1 << kMapPageOffsetShift) - 1;
static const uintptr_t kMapPageOffsetMask =
// 0x001FFC00
static const uint32_t kMapPageOffsetMask =
((1 << kForwardingOffsetShift) - 1) & ~kMapPageIndexMask; ((1 << kForwardingOffsetShift) - 1) & ~kMapPageIndexMask;
static const uintptr_t kForwardingOffsetMask =
// 0xFFE00000
static const uint32_t kForwardingOffsetMask =
~(kMapPageIndexMask | kMapPageOffsetMask); ~(kMapPageIndexMask | kMapPageOffsetMask);
private: private:
@ -1662,6 +1668,7 @@ class DescriptorArray: public FixedArray {
public: public:
// Is this the singleton empty_descriptor_array? // Is this the singleton empty_descriptor_array?
inline bool IsEmpty(); inline bool IsEmpty();
// Returns the number of descriptors in the array. // Returns the number of descriptors in the array.
int number_of_descriptors() { int number_of_descriptors() {
return IsEmpty() ? 0 : length() - kFirstIndex; return IsEmpty() ? 0 : length() - kFirstIndex;
@ -1801,12 +1808,14 @@ class DescriptorArray: public FixedArray {
static int ToKeyIndex(int descriptor_number) { static int ToKeyIndex(int descriptor_number) {
return descriptor_number+kFirstIndex; return descriptor_number+kFirstIndex;
} }
static int ToDetailsIndex(int descriptor_number) {
return (descriptor_number << 1) + 1;
}
static int ToValueIndex(int descriptor_number) { static int ToValueIndex(int descriptor_number) {
return descriptor_number << 1; return descriptor_number << 1;
} }
static int ToDetailsIndex(int descriptor_number) {
return( descriptor_number << 1) + 1;
}
bool is_null_descriptor(int descriptor_number) { bool is_null_descriptor(int descriptor_number) {
return PropertyDetails(GetDetails(descriptor_number)).type() == return PropertyDetails(GetDetails(descriptor_number)).type() ==
@ -2838,7 +2847,6 @@ class Map: public HeapObject {
// [stub cache]: contains stubs compiled for this map. // [stub cache]: contains stubs compiled for this map.
DECL_ACCESSORS(code_cache, FixedArray) DECL_ACCESSORS(code_cache, FixedArray)
// Returns a copy of the map.
Object* CopyDropDescriptors(); Object* CopyDropDescriptors();
// Returns a copy of the map, with all transitions dropped from the // Returns a copy of the map, with all transitions dropped from the
@ -2906,7 +2914,8 @@ class Map: public HeapObject {
static const int kInstanceDescriptorsOffset = static const int kInstanceDescriptorsOffset =
kConstructorOffset + kPointerSize; kConstructorOffset + kPointerSize;
static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize; static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize;
static const int kSize = kCodeCacheOffset + kPointerSize; static const int kPadStart = kCodeCacheOffset + kPointerSize;
static const int kSize = MAP_SIZE_ALIGN(kPadStart);
// Byte offsets within kInstanceSizesOffset. // Byte offsets within kInstanceSizesOffset.
static const int kInstanceSizeOffset = kInstanceSizesOffset + 0; static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;

3
deps/v8/src/parser.cc

@ -2657,6 +2657,9 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
Expression* cond = NULL; Expression* cond = NULL;
if (peek() != Token::SEMICOLON) { if (peek() != Token::SEMICOLON) {
cond = ParseExpression(true, CHECK_OK); cond = ParseExpression(true, CHECK_OK);
if (cond && cond->AsCompareOperation()) {
cond->AsCompareOperation()->set_is_for_loop_condition();
}
} }
Expect(Token::SEMICOLON, CHECK_OK); Expect(Token::SEMICOLON, CHECK_OK);

10
deps/v8/src/prettyprinter.cc

@ -594,11 +594,11 @@ class IndentedScope BASE_EMBEDDED {
ast_printer_->inc_indent(); ast_printer_->inc_indent();
} }
explicit IndentedScope(const char* txt, SmiAnalysis* type = NULL) { explicit IndentedScope(const char* txt, StaticType* type = NULL) {
ast_printer_->PrintIndented(txt); ast_printer_->PrintIndented(txt);
if ((type != NULL) && (type->IsKnown())) { if ((type != NULL) && (type->IsKnown())) {
ast_printer_->Print(" (type = "); ast_printer_->Print(" (type = ");
ast_printer_->Print(SmiAnalysis::Type2String(type)); ast_printer_->Print(StaticType::Type2String(type));
ast_printer_->Print(")"); ast_printer_->Print(")");
} }
ast_printer_->Print("\n"); ast_printer_->Print("\n");
@ -657,7 +657,7 @@ void AstPrinter::PrintLiteralIndented(const char* info,
void AstPrinter::PrintLiteralWithModeIndented(const char* info, void AstPrinter::PrintLiteralWithModeIndented(const char* info,
Variable* var, Variable* var,
Handle<Object> value, Handle<Object> value,
SmiAnalysis* type) { StaticType* type) {
if (var == NULL) { if (var == NULL) {
PrintLiteralIndented(info, value, true); PrintLiteralIndented(info, value, true);
} else { } else {
@ -665,7 +665,7 @@ void AstPrinter::PrintLiteralWithModeIndented(const char* info,
if (type->IsKnown()) { if (type->IsKnown()) {
OS::SNPrintF(buf, "%s (mode = %s, type = %s)", info, OS::SNPrintF(buf, "%s (mode = %s, type = %s)", info,
Variable::Mode2String(var->mode()), Variable::Mode2String(var->mode()),
SmiAnalysis::Type2String(type)); StaticType::Type2String(type));
} else { } else {
OS::SNPrintF(buf, "%s (mode = %s)", info, OS::SNPrintF(buf, "%s (mode = %s)", info,
Variable::Mode2String(var->mode())); Variable::Mode2String(var->mode()));
@ -1072,7 +1072,7 @@ void AstPrinter::VisitCountOperation(CountOperation* node) {
OS::SNPrintF(buf, "%s %s (type = %s)", OS::SNPrintF(buf, "%s %s (type = %s)",
(node->is_prefix() ? "PRE" : "POST"), (node->is_prefix() ? "PRE" : "POST"),
Token::Name(node->op()), Token::Name(node->op()),
SmiAnalysis::Type2String(node->type())); StaticType::Type2String(node->type()));
} else { } else {
OS::SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"), OS::SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"),
Token::Name(node->op())); Token::Name(node->op()));

2
deps/v8/src/prettyprinter.h

@ -102,7 +102,7 @@ class AstPrinter: public PrettyPrinter {
void PrintLiteralWithModeIndented(const char* info, void PrintLiteralWithModeIndented(const char* info,
Variable* var, Variable* var,
Handle<Object> value, Handle<Object> value,
SmiAnalysis* type); StaticType* type);
void PrintLabelsIndented(const char* info, ZoneStringList* labels); void PrintLabelsIndented(const char* info, ZoneStringList* labels);
void inc_indent() { indent_++; } void inc_indent() { indent_++; }

2
deps/v8/src/rewriter.cc

@ -367,7 +367,7 @@ void AstOptimizer::VisitAssignment(Assignment* node) {
if (proxy != NULL) { if (proxy != NULL) {
Variable* var = proxy->AsVariable(); Variable* var = proxy->AsVariable();
if (var != NULL) { if (var != NULL) {
SmiAnalysis* var_type = var->type(); StaticType* var_type = var->type();
if (var_type->IsUnknown()) { if (var_type->IsUnknown()) {
var_type->CopyFrom(node->type()); var_type->CopyFrom(node->type());
} else if (var_type->IsLikelySmi()) { } else if (var_type->IsLikelySmi()) {

105
deps/v8/src/runtime.cc

@ -398,6 +398,82 @@ static Object* Runtime_CreateArrayLiteralBoilerplate(Arguments args) {
} }
static Object* Runtime_CreateObjectLiteral(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
CONVERT_SMI_CHECKED(literals_index, args[1]);
CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
// Check if boilerplate exists. If not, create it first.
Handle<Object> boilerplate(literals->get(literals_index));
if (*boilerplate == Heap::undefined_value()) {
boilerplate = CreateObjectLiteralBoilerplate(literals, constant_properties);
if (boilerplate.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
}
return DeepCopyBoilerplate(JSObject::cast(*boilerplate));
}
static Object* Runtime_CreateObjectLiteralShallow(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
CONVERT_SMI_CHECKED(literals_index, args[1]);
CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
// Check if boilerplate exists. If not, create it first.
Handle<Object> boilerplate(literals->get(literals_index));
if (*boilerplate == Heap::undefined_value()) {
boilerplate = CreateObjectLiteralBoilerplate(literals, constant_properties);
if (boilerplate.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
}
return Heap::CopyJSObject(JSObject::cast(*boilerplate));
}
static Object* Runtime_CreateArrayLiteral(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
CONVERT_SMI_CHECKED(literals_index, args[1]);
CONVERT_ARG_CHECKED(FixedArray, elements, 2);
// Check if boilerplate exists. If not, create it first.
Handle<Object> boilerplate(literals->get(literals_index));
if (*boilerplate == Heap::undefined_value()) {
boilerplate = CreateArrayLiteralBoilerplate(literals, elements);
if (boilerplate.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
}
return DeepCopyBoilerplate(JSObject::cast(*boilerplate));
}
static Object* Runtime_CreateArrayLiteralShallow(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(FixedArray, literals, 0);
CONVERT_SMI_CHECKED(literals_index, args[1]);
CONVERT_ARG_CHECKED(FixedArray, elements, 2);
// Check if boilerplate exists. If not, create it first.
Handle<Object> boilerplate(literals->get(literals_index));
if (*boilerplate == Heap::undefined_value()) {
boilerplate = CreateArrayLiteralBoilerplate(literals, elements);
if (boilerplate.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
literals->set(literals_index, *boilerplate);
}
return Heap::CopyJSObject(JSObject::cast(*boilerplate));
}
static Object* Runtime_CreateCatchExtensionObject(Arguments args) { static Object* Runtime_CreateCatchExtensionObject(Arguments args) {
ASSERT(args.length() == 2); ASSERT(args.length() == 2);
CONVERT_CHECKED(String, key, args[0]); CONVERT_CHECKED(String, key, args[0]);
@ -644,7 +720,7 @@ static Object* Runtime_DeclareGlobals(Arguments args) {
// Copy the function and update its context. Use it as value. // Copy the function and update its context. Use it as value.
Handle<JSFunction> boilerplate = Handle<JSFunction>::cast(value); Handle<JSFunction> boilerplate = Handle<JSFunction>::cast(value);
Handle<JSFunction> function = Handle<JSFunction> function =
Factory::NewFunctionFromBoilerplate(boilerplate, context); Factory::NewFunctionFromBoilerplate(boilerplate, context, TENURED);
value = function; value = function;
} }
@ -719,12 +795,15 @@ static Object* Runtime_DeclareContextSlot(Arguments args) {
if (*initial_value != NULL) { if (*initial_value != NULL) {
if (index >= 0) { if (index >= 0) {
// The variable or constant context slot should always be in // The variable or constant context slot should always be in
// the function context; not in any outer context nor in the // the function context or the arguments object.
// arguments object. if (holder->IsContext()) {
ASSERT(holder.is_identical_to(context)); ASSERT(holder.is_identical_to(context));
if (((attributes & READ_ONLY) == 0) || if (((attributes & READ_ONLY) == 0) ||
context->get(index)->IsTheHole()) { context->get(index)->IsTheHole()) {
context->set(index, *initial_value); context->set(index, *initial_value);
}
} else {
Handle<JSObject>::cast(holder)->SetElement(index, *initial_value);
} }
} else { } else {
// Slow case: The property is not in the FixedArray part of the context. // Slow case: The property is not in the FixedArray part of the context.
@ -4423,8 +4502,11 @@ static Object* Runtime_NewClosure(Arguments args) {
CONVERT_ARG_CHECKED(Context, context, 0); CONVERT_ARG_CHECKED(Context, context, 0);
CONVERT_ARG_CHECKED(JSFunction, boilerplate, 1); CONVERT_ARG_CHECKED(JSFunction, boilerplate, 1);
PretenureFlag pretenure = (context->global_context() == *context)
? TENURED // Allocate global closures in old space.
: NOT_TENURED; // Allocate local closures in new space.
Handle<JSFunction> result = Handle<JSFunction> result =
Factory::NewFunctionFromBoilerplate(boilerplate, context); Factory::NewFunctionFromBoilerplate(boilerplate, context, pretenure);
return *result; return *result;
} }
@ -5140,7 +5222,7 @@ static Object* Runtime_CompileString(Arguments args) {
validate); validate);
if (boilerplate.is_null()) return Failure::Exception(); if (boilerplate.is_null()) return Failure::Exception();
Handle<JSFunction> fun = Handle<JSFunction> fun =
Factory::NewFunctionFromBoilerplate(boilerplate, context); Factory::NewFunctionFromBoilerplate(boilerplate, context, NOT_TENURED);
return *fun; return *fun;
} }
@ -5168,7 +5250,7 @@ static Object* CompileDirectEval(Handle<String> source) {
Compiler::DONT_VALIDATE_JSON); Compiler::DONT_VALIDATE_JSON);
if (boilerplate.is_null()) return Failure::Exception(); if (boilerplate.is_null()) return Failure::Exception();
Handle<JSFunction> fun = Handle<JSFunction> fun =
Factory::NewFunctionFromBoilerplate(boilerplate, context); Factory::NewFunctionFromBoilerplate(boilerplate, context, NOT_TENURED);
return *fun; return *fun;
} }
@ -7805,7 +7887,8 @@ static Object* Runtime_CollectStackTrace(Arguments args) {
HandleScope scope; HandleScope scope;
int initial_size = limit < 10 ? limit : 10; limit = Max(limit, 0); // Ensure that limit is not negative.
int initial_size = Min(limit, 10);
Handle<JSArray> result = Factory::NewJSArray(initial_size * 3); Handle<JSArray> result = Factory::NewJSArray(initial_size * 3);
StackFrameIterator iter; StackFrameIterator iter;

4
deps/v8/src/runtime.h

@ -223,6 +223,10 @@ namespace internal {
F(CreateObjectLiteralBoilerplate, 3, 1) \ F(CreateObjectLiteralBoilerplate, 3, 1) \
F(CloneLiteralBoilerplate, 1, 1) \ F(CloneLiteralBoilerplate, 1, 1) \
F(CloneShallowLiteralBoilerplate, 1, 1) \ F(CloneShallowLiteralBoilerplate, 1, 1) \
F(CreateObjectLiteral, 3, 1) \
F(CreateObjectLiteralShallow, 3, 1) \
F(CreateArrayLiteral, 3, 1) \
F(CreateArrayLiteralShallow, 3, 1) \
\ \
/* Catch context extension objects */ \ /* Catch context extension objects */ \
F(CreateCatchExtensionObject, 2, 1) \ F(CreateCatchExtensionObject, 2, 1) \

6
deps/v8/src/runtime.js

@ -122,6 +122,12 @@ function COMPARE(x, ncr) {
return %StringCompare(this, x); return %StringCompare(this, x);
} }
// If one of the operands is undefined, it will convert to NaN and
// thus the result should be as if one of the operands was NaN.
if (IS_UNDEFINED(this) || IS_UNDEFINED(x)) {
return ncr;
}
// Default implementation. // Default implementation.
var a = %ToPrimitive(this, NUMBER_HINT); var a = %ToPrimitive(this, NUMBER_HINT);
var b = %ToPrimitive(x, NUMBER_HINT); var b = %ToPrimitive(x, NUMBER_HINT);

3
deps/v8/src/scopes.cc

@ -189,8 +189,7 @@ void Scope::Initialize(bool inside_with) {
variables_.Declare(this, Factory::this_symbol(), Variable::VAR, variables_.Declare(this, Factory::this_symbol(), Variable::VAR,
false, Variable::THIS); false, Variable::THIS);
var->rewrite_ = new Slot(var, Slot::PARAMETER, -1); var->rewrite_ = new Slot(var, Slot::PARAMETER, -1);
receiver_ = new VariableProxy(Factory::this_symbol(), true, false); receiver_ = var;
receiver_->BindTo(var);
if (is_function_scope()) { if (is_function_scope()) {
// Declare 'arguments' variable which exists in all functions. // Declare 'arguments' variable which exists in all functions.

11
deps/v8/src/scopes.h

@ -206,8 +206,13 @@ class Scope: public ZoneObject {
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Accessors. // Accessors.
// The variable corresponding to the (function) receiver. // A new variable proxy corresponding to the (function) receiver.
VariableProxy* receiver() const { return receiver_; } VariableProxy* receiver() const {
VariableProxy* proxy =
new VariableProxy(Factory::this_symbol(), true, false);
proxy->BindTo(receiver_);
return proxy;
}
// The variable holding the function literal for named function // The variable holding the function literal for named function
// literals, or NULL. // literals, or NULL.
@ -314,7 +319,7 @@ class Scope: public ZoneObject {
// Declarations. // Declarations.
ZoneList<Declaration*> decls_; ZoneList<Declaration*> decls_;
// Convenience variable. // Convenience variable.
VariableProxy* receiver_; Variable* receiver_;
// Function variable, if any; function scopes only. // Function variable, if any; function scopes only.
Variable* function_; Variable* function_;
// Convenience variable; function scopes only. // Convenience variable; function scopes only.

22
deps/v8/src/serialize.cc

@ -55,9 +55,8 @@ class SerializationAddressMapper {
static int MappedTo(HeapObject* obj) { static int MappedTo(HeapObject* obj) {
ASSERT(IsMapped(obj)); ASSERT(IsMapped(obj));
return reinterpret_cast<intptr_t>(serialization_map_->Lookup(Key(obj), return static_cast<int>(reinterpret_cast<intptr_t>(
Hash(obj), serialization_map_->Lookup(Key(obj), Hash(obj), false)->value));
false)->value);
} }
static void Map(HeapObject* obj, int to) { static void Map(HeapObject* obj, int to) {
@ -81,7 +80,7 @@ class SerializationAddressMapper {
} }
static uint32_t Hash(HeapObject* obj) { static uint32_t Hash(HeapObject* obj) {
return reinterpret_cast<intptr_t>(obj->address()); return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
} }
static void* Key(HeapObject* obj) { static void* Key(HeapObject* obj) {
@ -485,6 +484,15 @@ void ExternalReferenceTable::PopulateTable() {
21, 21,
"NativeRegExpMacroAssembler::GrowStack()"); "NativeRegExpMacroAssembler::GrowStack()");
#endif #endif
// Keyed lookup cache.
Add(ExternalReference::keyed_lookup_cache_keys().address(),
UNCLASSIFIED,
22,
"KeyedLookupCache::keys()");
Add(ExternalReference::keyed_lookup_cache_field_offsets().address(),
UNCLASSIFIED,
23,
"KeyedLookupCache::field_offsets()");
} }
@ -624,7 +632,7 @@ HeapObject* Deserializer::GetAddressFromStart(int space) {
return HeapObject::FromAddress(pages_[space][0] + offset); return HeapObject::FromAddress(pages_[space][0] + offset);
} }
ASSERT(SpaceIsPaged(space)); ASSERT(SpaceIsPaged(space));
int page_of_pointee = offset >> Page::kPageSizeBits; int page_of_pointee = offset >> kPageSizeBits;
Address object_address = pages_[space][page_of_pointee] + Address object_address = pages_[space][page_of_pointee] +
(offset & Page::kPageAlignmentMask); (offset & Page::kPageAlignmentMask);
return HeapObject::FromAddress(object_address); return HeapObject::FromAddress(object_address);
@ -964,8 +972,8 @@ void Serializer::SerializeObject(
int offset = CurrentAllocationAddress(space) - address; int offset = CurrentAllocationAddress(space) - address;
bool from_start = true; bool from_start = true;
if (SpaceIsPaged(space)) { if (SpaceIsPaged(space)) {
if ((CurrentAllocationAddress(space) >> Page::kPageSizeBits) == if ((CurrentAllocationAddress(space) >> kPageSizeBits) ==
(address >> Page::kPageSizeBits)) { (address >> kPageSizeBits)) {
from_start = false; from_start = false;
address = offset; address = offset;
} }

4
deps/v8/src/spaces.cc

@ -398,7 +398,7 @@ static int PagesInChunk(Address start, size_t size) {
// start+size. Page::kPageSize is a power of two so we can divide by // start+size. Page::kPageSize is a power of two so we can divide by
// shifting. // shifting.
return static_cast<int>((RoundDown(start + size, Page::kPageSize) return static_cast<int>((RoundDown(start + size, Page::kPageSize)
- RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits); - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
} }
@ -412,7 +412,7 @@ Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
if (size_ + static_cast<int>(chunk_size) > capacity_) { if (size_ + static_cast<int>(chunk_size) > capacity_) {
// Request as many pages as we can. // Request as many pages as we can.
chunk_size = capacity_ - size_; chunk_size = capacity_ - size_;
requested_pages = static_cast<int>(chunk_size >> Page::kPageSizeBits); requested_pages = static_cast<int>(chunk_size >> kPageSizeBits);
if (requested_pages <= 0) return Page::FromAddress(NULL); if (requested_pages <= 0) return Page::FromAddress(NULL);
} }

27
deps/v8/src/spaces.h

@ -65,20 +65,23 @@ namespace internal {
// Some assertion macros used in the debugging mode. // Some assertion macros used in the debugging mode.
#define ASSERT_PAGE_ALIGNED(address) \ #define ASSERT_PAGE_ALIGNED(address) \
ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
#define ASSERT_OBJECT_ALIGNED(address) \ #define ASSERT_OBJECT_ALIGNED(address) \
ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0) ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
#define ASSERT_OBJECT_SIZE(size) \ #define ASSERT_MAP_ALIGNED(address) \
ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
#define ASSERT_OBJECT_SIZE(size) \
ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize)) ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
#define ASSERT_PAGE_OFFSET(offset) \ #define ASSERT_PAGE_OFFSET(offset) \
ASSERT((Page::kObjectStartOffset <= offset) \ ASSERT((Page::kObjectStartOffset <= offset) \
&& (offset <= Page::kPageSize)) && (offset <= Page::kPageSize))
#define ASSERT_MAP_PAGE_INDEX(index) \ #define ASSERT_MAP_PAGE_INDEX(index) \
ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
@ -106,11 +109,8 @@ class AllocationInfo;
// For this reason we add an offset to get room for the Page data at the start. // For this reason we add an offset to get room for the Page data at the start.
// //
// The mark-compact collector transforms a map pointer into a page index and a // The mark-compact collector transforms a map pointer into a page index and a
// page offset. The map space can have up to 1024 pages, and 8M bytes (1024 * // page offset. The excact encoding is described in the comments for
// 8K) in total. Because a map pointer is aligned to the pointer size (4 // class MapWord in objects.h.
// bytes), 11 bits are enough to encode the page offset. 21 bits (10 for the
// page index + 11 for the offset in the page) are required to encode a map
// pointer.
// //
// The only way to get a page pointer is by calling factory methods: // The only way to get a page pointer is by calling factory methods:
// Page* p = Page::FromAddress(addr); or // Page* p = Page::FromAddress(addr); or
@ -212,9 +212,6 @@ class Page {
static void set_rset_state(RSetState state) { rset_state_ = state; } static void set_rset_state(RSetState state) { rset_state_ = state; }
#endif #endif
// 8K bytes per page.
static const int kPageSizeBits = 13;
// Page size in bytes. This must be a multiple of the OS page size. // Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits; static const int kPageSize = 1 << kPageSizeBits;
@ -514,7 +511,7 @@ class MemoryAllocator : public AllStatic {
#endif #endif
// Due to encoding limitation, we can only have 8K chunks. // Due to encoding limitation, we can only have 8K chunks.
static const int kMaxNofChunks = 1 << Page::kPageSizeBits; static const int kMaxNofChunks = 1 << kPageSizeBits;
// If a chunk has at least 16 pages, the maximum heap size is about // If a chunk has at least 16 pages, the maximum heap size is about
// 8K * 8K * 16 = 1G bytes. // 8K * 8K * 16 = 1G bytes.
#ifdef V8_TARGET_ARCH_X64 #ifdef V8_TARGET_ARCH_X64

4
deps/v8/src/stub-cache.cc

@ -120,7 +120,7 @@ Object* StubCache::ComputeLoadCallback(String* name,
Object* code = receiver->map()->FindInCodeCache(name, flags); Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) { if (code->IsUndefined()) {
LoadStubCompiler compiler; LoadStubCompiler compiler;
code = compiler.CompileLoadCallback(receiver, holder, callback, name); code = compiler.CompileLoadCallback(name, receiver, holder, callback);
if (code->IsFailure()) return code; if (code->IsFailure()) return code;
LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name)); LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code)); Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
@ -831,7 +831,7 @@ static Object* ThrowReferenceError(String* name) {
// can't use either LoadIC or KeyedLoadIC constructors. // can't use either LoadIC or KeyedLoadIC constructors.
IC ic(IC::NO_EXTRA_FRAME); IC ic(IC::NO_EXTRA_FRAME);
ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub()); ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub());
if (!ic.is_contextual()) return Heap::undefined_value(); if (!ic.SlowIsContextual()) return Heap::undefined_value();
// Throw a reference error. // Throw a reference error.
HandleScope scope; HandleScope scope;

11
deps/v8/src/stub-cache.h

@ -405,7 +405,7 @@ class StubCompiler BASE_EMBEDDED {
String* name, String* name,
Label* miss); Label* miss);
void GenerateLoadCallback(JSObject* object, bool GenerateLoadCallback(JSObject* object,
JSObject* holder, JSObject* holder,
Register receiver, Register receiver,
Register name_reg, Register name_reg,
@ -413,7 +413,8 @@ class StubCompiler BASE_EMBEDDED {
Register scratch2, Register scratch2,
AccessorInfo* callback, AccessorInfo* callback,
String* name, String* name,
Label* miss); Label* miss,
Failure** failure);
void GenerateLoadConstant(JSObject* object, void GenerateLoadConstant(JSObject* object,
JSObject* holder, JSObject* holder,
@ -447,10 +448,10 @@ class LoadStubCompiler: public StubCompiler {
JSObject* holder, JSObject* holder,
int index, int index,
String* name); String* name);
Object* CompileLoadCallback(JSObject* object, Object* CompileLoadCallback(String* name,
JSObject* object,
JSObject* holder, JSObject* holder,
AccessorInfo* callback, AccessorInfo* callback);
String* name);
Object* CompileLoadConstant(JSObject* object, Object* CompileLoadConstant(JSObject* object,
JSObject* holder, JSObject* holder,
Object* value, Object* value,

2
deps/v8/src/token.cc

@ -32,13 +32,11 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#ifdef DEBUG
#define T(name, string, precedence) #name, #define T(name, string, precedence) #name,
const char* Token::name_[NUM_TOKENS] = { const char* Token::name_[NUM_TOKENS] = {
TOKEN_LIST(T, T, IGNORE_TOKEN) TOKEN_LIST(T, T, IGNORE_TOKEN)
}; };
#undef T #undef T
#endif
#define T(name, string, precedence) string, #define T(name, string, precedence) string,

9
deps/v8/src/token.h

@ -66,8 +66,9 @@ namespace internal {
T(DEC, "--", 0) \ T(DEC, "--", 0) \
\ \
/* Assignment operators. */ \ /* Assignment operators. */ \
/* IsAssignmentOp() relies on this block of enum values */ \ /* IsAssignmentOp() and Assignment::is_compound() relies on */ \
/* being contiguous and sorted in the same order! */ \ /* this block of enum values being contiguous and sorted in the */ \
/* same order! */ \
T(INIT_VAR, "=init_var", 2) /* AST-use only. */ \ T(INIT_VAR, "=init_var", 2) /* AST-use only. */ \
T(INIT_CONST, "=init_const", 2) /* AST-use only. */ \ T(INIT_CONST, "=init_const", 2) /* AST-use only. */ \
T(ASSIGN, "=", 2) \ T(ASSIGN, "=", 2) \
@ -211,14 +212,12 @@ class Token {
}; };
#undef T #undef T
#ifdef DEBUG
// Returns a string corresponding to the C++ token name // Returns a string corresponding to the C++ token name
// (e.g. "LT" for the token LT). // (e.g. "LT" for the token LT).
static const char* Name(Value tok) { static const char* Name(Value tok) {
ASSERT(0 <= tok && tok < NUM_TOKENS); ASSERT(0 <= tok && tok < NUM_TOKENS);
return name_[tok]; return name_[tok];
} }
#endif
// Predicates // Predicates
static bool IsAssignmentOp(Value tok) { static bool IsAssignmentOp(Value tok) {
@ -261,9 +260,7 @@ class Token {
} }
private: private:
#ifdef DEBUG
static const char* name_[NUM_TOKENS]; static const char* name_[NUM_TOKENS];
#endif
static const char* string_[NUM_TOKENS]; static const char* string_[NUM_TOKENS];
static int8_t precedence_[NUM_TOKENS]; static int8_t precedence_[NUM_TOKENS];
}; };

2
deps/v8/src/v8-counters.h

@ -74,8 +74,6 @@ namespace internal {
SC(objs_since_last_full, V8.ObjsSinceLastFull) \ SC(objs_since_last_full, V8.ObjsSinceLastFull) \
SC(symbol_table_capacity, V8.SymbolTableCapacity) \ SC(symbol_table_capacity, V8.SymbolTableCapacity) \
SC(number_of_symbols, V8.NumberOfSymbols) \ SC(number_of_symbols, V8.NumberOfSymbols) \
/* Current amount of memory in external string buffers. */ \
SC(total_external_string_memory, V8.TotalExternalStringMemory) \
SC(script_wrappers, V8.ScriptWrappers) \ SC(script_wrappers, V8.ScriptWrappers) \
SC(call_initialize_stubs, V8.CallInitializeStubs) \ SC(call_initialize_stubs, V8.CallInitializeStubs) \
SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs) \ SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs) \

207
deps/v8/src/v8natives.js

@ -41,6 +41,7 @@
const $isNaN = GlobalIsNaN; const $isNaN = GlobalIsNaN;
const $isFinite = GlobalIsFinite; const $isFinite = GlobalIsFinite;
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
@ -87,7 +88,7 @@ function GlobalIsFinite(number) {
// ECMA-262 - 15.1.2.2 // ECMA-262 - 15.1.2.2
function GlobalParseInt(string, radix) { function GlobalParseInt(string, radix) {
if (radix === void 0) { if (IS_UNDEFINED(radix)) {
// Some people use parseInt instead of Math.floor. This // Some people use parseInt instead of Math.floor. This
// optimization makes parseInt on a Smi 12 times faster (60ns // optimization makes parseInt on a Smi 12 times faster (60ns
// vs 800ns). The following optimization makes parseInt on a // vs 800ns). The following optimization makes parseInt on a
@ -280,6 +281,207 @@ function ObjectKeys(obj) {
} }
// ES5 8.10.1.
function IsAccessorDescriptor(desc) {
if (IS_UNDEFINED(desc)) return false;
return desc.hasGetter_ || desc.hasSetter_;
}
// ES5 8.10.2.
function IsDataDescriptor(desc) {
if (IS_UNDEFINED(desc)) return false;
return desc.hasValue_ || desc.hasWritable_;
}
// ES5 8.10.3.
function IsGenericDescriptor(desc) {
return !(IsAccessorDescriptor(desc) || IsDataDescriptor(desc));
}
function IsInconsistentDescriptor(desc) {
return IsAccessorDescriptor(desc) && IsDataDescriptor(desc);
}
// ES5 8.10.5.
function ToPropertyDescriptor(obj) {
if (!IS_OBJECT(obj)) {
throw MakeTypeError("property_desc_object", [obj]);
}
var desc = new PropertyDescriptor();
if ("enumerable" in obj) {
desc.setEnumerable(ToBoolean(obj.enumerable));
}
if ("configurable" in obj) {
desc.setConfigurable(ToBoolean(obj.configurable));
}
if ("value" in obj) {
desc.setValue(obj.value);
}
if ("writable" in obj) {
desc.setWritable(ToBoolean(obj.writable));
}
if ("get" in obj) {
var get = obj.get;
if (!IS_UNDEFINED(get) && !IS_FUNCTION(get)) {
throw MakeTypeError("getter_must_be_callable", [get]);
}
desc.setGet(get);
}
if ("set" in obj) {
var set = obj.set;
if (!IS_UNDEFINED(set) && !IS_FUNCTION(set)) {
throw MakeTypeError("setter_must_be_callable", [set]);
}
desc.setSet(set);
}
if (IsInconsistentDescriptor(desc)) {
throw MakeTypeError("value_and_accessor", [obj]);
}
return desc;
}
function PropertyDescriptor() {
// Initialize here so they are all in-object and have the same map.
// Default values from ES5 8.6.1.
this.value_ = void 0;
this.hasValue_ = false;
this.writable_ = false;
this.hasWritable_ = false;
this.enumerable_ = false;
this.configurable_ = false;
this.get_ = void 0;
this.hasGetter_ = false;
this.set_ = void 0;
this.hasSetter_ = false;
}
PropertyDescriptor.prototype.setValue = function(value) {
this.value_ = value;
this.hasValue_ = true;
}
PropertyDescriptor.prototype.getValue = function() {
return this.value_;
}
PropertyDescriptor.prototype.setEnumerable = function(enumerable) {
this.enumerable_ = enumerable;
}
PropertyDescriptor.prototype.isEnumerable = function () {
return this.enumerable_;
}
PropertyDescriptor.prototype.setWritable = function(writable) {
this.writable_ = writable;
this.hasWritable_ = true;
}
PropertyDescriptor.prototype.isWritable = function() {
return this.writable_;
}
PropertyDescriptor.prototype.setConfigurable = function(configurable) {
this.configurable_ = configurable;
}
PropertyDescriptor.prototype.isConfigurable = function() {
return this.configurable_;
}
PropertyDescriptor.prototype.setGet = function(get) {
this.get_ = get;
this.hasGetter_ = true;
}
PropertyDescriptor.prototype.getGet = function() {
return this.get_;
}
PropertyDescriptor.prototype.setSet = function(set) {
this.set_ = set;
this.hasSetter_ = true;
}
PropertyDescriptor.prototype.getSet = function() {
return this.set_;
}
// ES5 8.12.9. This version cannot cope with the property p already
// being present on obj.
function DefineOwnProperty(obj, p, desc, should_throw) {
var flag = desc.isEnumerable() ? 0 : DONT_ENUM;
if (IsDataDescriptor(desc)) {
flag |= desc.isWritable() ? 0 : (DONT_DELETE | READ_ONLY);
%SetProperty(obj, p, desc.getValue(), flag);
} else {
if (IS_FUNCTION(desc.getGet())) %DefineAccessor(obj, p, GETTER, desc.getGet(), flag);
if (IS_FUNCTION(desc.getSet())) %DefineAccessor(obj, p, SETTER, desc.getSet(), flag);
}
return true;
}
// ES5 section 15.2.3.5.
function ObjectCreate(proto, properties) {
if (!IS_OBJECT(proto) && !IS_NULL(proto)) {
throw MakeTypeError("proto_object_or_null", [proto]);
}
var obj = new $Object();
obj.__proto__ = proto;
if (!IS_UNDEFINED(properties)) ObjectDefineProperties(obj, properties);
return obj;
}
// ES5 section 15.2.3.7. This version cannot cope with the properies already
// being present on obj. Therefore it is not exposed as
// Object.defineProperties yet.
function ObjectDefineProperties(obj, properties) {
var props = ToObject(properties);
var key_values = [];
for (var key in props) {
if (%HasLocalProperty(props, key)) {
key_values.push(key);
var value = props[key];
var desc = ToPropertyDescriptor(value);
key_values.push(desc);
}
}
for (var i = 0; i < key_values.length; i += 2) {
var key = key_values[i];
var desc = key_values[i + 1];
DefineOwnProperty(obj, key, desc, true);
}
}
%SetCode($Object, function(x) { %SetCode($Object, function(x) {
if (%_IsConstructCall()) { if (%_IsConstructCall()) {
if (x == null) return this; if (x == null) return this;
@ -309,7 +511,8 @@ function SetupObject() {
"__lookupSetter__", ObjectLookupSetter "__lookupSetter__", ObjectLookupSetter
)); ));
InstallFunctions($Object, DONT_ENUM, $Array( InstallFunctions($Object, DONT_ENUM, $Array(
"keys", ObjectKeys "keys", ObjectKeys,
"create", ObjectCreate
)); ));
} }

4
deps/v8/src/variables.cc

@ -86,10 +86,10 @@ void UseCount::Print() {
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Implementation SmiAnalysis. // Implementation StaticType.
const char* SmiAnalysis::Type2String(SmiAnalysis* type) { const char* StaticType::Type2String(StaticType* type) {
switch (type->kind_) { switch (type->kind_) {
case UNKNOWN: case UNKNOWN:
return "UNKNOWN"; return "UNKNOWN";

14
deps/v8/src/variables.h

@ -65,14 +65,14 @@ class UseCount BASE_EMBEDDED {
// Variables and AST expression nodes can track their "type" to enable // Variables and AST expression nodes can track their "type" to enable
// optimizations and removal of redundant checks when generating code. // optimizations and removal of redundant checks when generating code.
class SmiAnalysis { class StaticType {
public: public:
enum Kind { enum Kind {
UNKNOWN, UNKNOWN,
LIKELY_SMI LIKELY_SMI
}; };
SmiAnalysis() : kind_(UNKNOWN) {} StaticType() : kind_(UNKNOWN) {}
bool Is(Kind kind) const { return kind_ == kind; } bool Is(Kind kind) const { return kind_ == kind; }
@ -80,11 +80,11 @@ class SmiAnalysis {
bool IsUnknown() const { return Is(UNKNOWN); } bool IsUnknown() const { return Is(UNKNOWN); }
bool IsLikelySmi() const { return Is(LIKELY_SMI); } bool IsLikelySmi() const { return Is(LIKELY_SMI); }
void CopyFrom(SmiAnalysis* other) { void CopyFrom(StaticType* other) {
kind_ = other->kind_; kind_ = other->kind_;
} }
static const char* Type2String(SmiAnalysis* type); static const char* Type2String(StaticType* type);
// LIKELY_SMI accessors // LIKELY_SMI accessors
void SetAsLikelySmi() { void SetAsLikelySmi() {
@ -100,7 +100,7 @@ class SmiAnalysis {
private: private:
Kind kind_; Kind kind_;
DISALLOW_COPY_AND_ASSIGN(SmiAnalysis); DISALLOW_COPY_AND_ASSIGN(StaticType);
}; };
@ -203,7 +203,7 @@ class Variable: public ZoneObject {
Expression* rewrite() const { return rewrite_; } Expression* rewrite() const { return rewrite_; }
Slot* slot() const; Slot* slot() const;
SmiAnalysis* type() { return &type_; } StaticType* type() { return &type_; }
private: private:
Scope* scope_; Scope* scope_;
@ -220,7 +220,7 @@ class Variable: public ZoneObject {
UseCount obj_uses_; // uses of the object the variable points to UseCount obj_uses_; // uses of the object the variable points to
// Static type information // Static type information
SmiAnalysis type_; StaticType type_;
// Code generation. // Code generation.
// rewrite_ is usually a Slot or a Property, but may be any expression. // rewrite_ is usually a Slot or a Property, but may be any expression.

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script. // cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2 #define MAJOR_VERSION 2
#define MINOR_VERSION 0 #define MINOR_VERSION 0
#define BUILD_NUMBER 3 #define BUILD_NUMBER 5
#define PATCH_LEVEL 0 #define PATCH_LEVEL 0
#define CANDIDATE_VERSION false #define CANDIDATE_VERSION false

276
deps/v8/src/x64/codegen-x64.cc

@ -4051,7 +4051,8 @@ void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
Load(args->at(0)); Load(args->at(0));
Load(args->at(1)); Load(args->at(1));
Result answer = frame_->CallRuntime(Runtime::kStringAdd, 2); StringAddStub stub(NO_STRING_ADD_FLAGS);
Result answer = frame_->CallStub(&stub, 2);
frame_->Push(&answer); frame_->Push(&answer);
} }
@ -5126,7 +5127,7 @@ void DeferredInlineBinaryOperation::Generate() {
void CodeGenerator::GenericBinaryOperation(Token::Value op, void CodeGenerator::GenericBinaryOperation(Token::Value op,
SmiAnalysis* type, StaticType* type,
OverwriteMode overwrite_mode) { OverwriteMode overwrite_mode) {
Comment cmnt(masm_, "[ BinaryOperation"); Comment cmnt(masm_, "[ BinaryOperation");
Comment cmnt_token(masm_, Token::String(op)); Comment cmnt_token(masm_, Token::String(op));
@ -5315,7 +5316,7 @@ void DeferredInlineSmiOperation::Generate() {
void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
Result* operand, Result* operand,
Handle<Object> value, Handle<Object> value,
SmiAnalysis* type, StaticType* type,
bool reversed, bool reversed,
OverwriteMode overwrite_mode) { OverwriteMode overwrite_mode) {
// NOTE: This is an attempt to inline (a bit) more of the code for // NOTE: This is an attempt to inline (a bit) more of the code for
@ -6098,7 +6099,7 @@ void Reference::SetValue(InitState init_state) {
// a loop and the key is likely to be a smi. // a loop and the key is likely to be a smi.
Property* property = expression()->AsProperty(); Property* property = expression()->AsProperty();
ASSERT(property != NULL); ASSERT(property != NULL);
SmiAnalysis* key_smi_analysis = property->key()->type(); StaticType* key_smi_analysis = property->key()->type();
if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) { if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
Comment cmnt(masm, "[ Inlined store to keyed Property"); Comment cmnt(masm, "[ Inlined store to keyed Property");
@ -7371,19 +7372,28 @@ void FloatingPointHelper::CheckNumberOperands(MacroAssembler* masm,
const char* GenericBinaryOpStub::GetName() { const char* GenericBinaryOpStub::GetName() {
switch (op_) { if (name_ != NULL) return name_;
case Token::ADD: return "GenericBinaryOpStub_ADD"; const int len = 100;
case Token::SUB: return "GenericBinaryOpStub_SUB"; name_ = Bootstrapper::AllocateAutoDeletedArray(len);
case Token::MUL: return "GenericBinaryOpStub_MUL"; if (name_ == NULL) return "OOM";
case Token::DIV: return "GenericBinaryOpStub_DIV"; const char* op_name = Token::Name(op_);
case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR"; const char* overwrite_name;
case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND"; switch (mode_) {
case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR"; case NO_OVERWRITE: overwrite_name = "Alloc"; break;
case Token::SAR: return "GenericBinaryOpStub_SAR"; case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
case Token::SHL: return "GenericBinaryOpStub_SHL"; case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
case Token::SHR: return "GenericBinaryOpStub_SHR"; default: overwrite_name = "UnknownOverwrite"; break;
default: return "GenericBinaryOpStub"; }
}
OS::SNPrintF(Vector<char>(name_, len),
"GenericBinaryOpStub_%s_%s%s_%s%s_%s",
op_name,
overwrite_name,
(flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
args_in_registers_ ? "RegArgs" : "StackArgs",
args_reversed_ ? "_R" : "",
use_sse3_ ? "SSE3" : "SSE2");
return name_;
} }
@ -7796,8 +7806,8 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ j(above_equal, &string1); __ j(above_equal, &string1);
// First and second argument are strings. // First and second argument are strings.
Runtime::Function* f = Runtime::FunctionForId(Runtime::kStringAdd); StringAddStub stub(NO_STRING_CHECK_IN_STUB);
__ TailCallRuntime(ExternalReference(f), 2, f->result_size); __ TailCallStub(&stub);
// Only first argument is a string. // Only first argument is a string.
__ bind(&string1); __ bind(&string1);
@ -7880,6 +7890,234 @@ int CompareStub::MinorKey() {
return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0); return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
} }
void StringAddStub::Generate(MacroAssembler* masm) {
Label string_add_runtime;
// Load the two arguments.
__ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument.
__ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument.
// Make sure that both arguments are strings if not known in advance.
if (string_check_) {
Condition is_smi;
is_smi = masm->CheckSmi(rax);
__ j(is_smi, &string_add_runtime);
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
__ j(above_equal, &string_add_runtime);
// First argument is a a string, test second.
is_smi = masm->CheckSmi(rdx);
__ j(is_smi, &string_add_runtime);
__ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
__ j(above_equal, &string_add_runtime);
}
// Both arguments are strings.
// rax: first string
// rdx: second string
// Check if either of the strings are empty. In that case return the other.
Label second_not_zero_length, both_not_zero_length;
__ movl(rcx, FieldOperand(rdx, String::kLengthOffset));
__ testl(rcx, rcx);
__ j(not_zero, &second_not_zero_length);
// Second string is empty, result is first string which is already in rax.
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
__ bind(&second_not_zero_length);
__ movl(rbx, FieldOperand(rax, String::kLengthOffset));
__ testl(rbx, rbx);
__ j(not_zero, &both_not_zero_length);
// First string is empty, result is second string which is in rdx.
__ movq(rax, rdx);
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
// Both strings are non-empty.
// rax: first string
// rbx: length of first string
// ecx: length of second string
// edx: second string
// r8: instance type of first string if string check was performed above
// r9: instance type of first string if string check was performed above
Label string_add_flat_result;
__ bind(&both_not_zero_length);
// Look at the length of the result of adding the two strings.
__ addl(rbx, rcx);
// Use the runtime system when adding two one character strings, as it
// contains optimizations for this specific case using the symbol table.
__ cmpl(rbx, Immediate(2));
__ j(equal, &string_add_runtime);
// If arguments where known to be strings, maps are not loaded to r8 and r9
// by the code above.
if (!string_check_) {
__ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
__ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
}
// Get the instance types of the two strings as they will be needed soon.
__ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
__ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
// Check if resulting string will be flat.
__ cmpl(rbx, Immediate(String::kMinNonFlatLength));
__ j(below, &string_add_flat_result);
// Handle exceptionally long strings in the runtime system.
ASSERT((String::kMaxLength & 0x80000000) == 0);
__ cmpl(rbx, Immediate(String::kMaxLength));
__ j(above, &string_add_runtime);
// If result is not supposed to be flat, allocate a cons string object. If
// both strings are ascii the result is an ascii cons string.
// rax: first string
// ebx: length of resulting flat string
// rdx: second string
// r8: instance type of first string
// r9: instance type of second string
Label non_ascii, allocated;
__ movl(rcx, r8);
__ and_(rcx, r9);
ASSERT(kStringEncodingMask == kAsciiStringTag);
__ testl(rcx, Immediate(kAsciiStringTag));
__ j(zero, &non_ascii);
// Allocate an acsii cons string.
__ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
__ movl(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
__ movl(FieldOperand(rcx, ConsString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
__ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
__ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
__ movq(rax, rcx);
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
__ bind(&non_ascii);
// Allocate a two byte cons string.
__ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime);
__ jmp(&allocated);
// Handle creating a flat result. First check that both strings are not
// external strings.
// rax: first string
// ebx: length of resulting flat string
// rdx: second string
// r8: instance type of first string
// r9: instance type of first string
__ bind(&string_add_flat_result);
__ movl(rcx, r8);
__ and_(rcx, Immediate(kStringRepresentationMask));
__ cmpl(rcx, Immediate(kExternalStringTag));
__ j(equal, &string_add_runtime);
__ movl(rcx, r9);
__ and_(rcx, Immediate(kStringRepresentationMask));
__ cmpl(rcx, Immediate(kExternalStringTag));
__ j(equal, &string_add_runtime);
// Now check if both strings are ascii strings.
// rax: first string
// ebx: length of resulting flat string
// rdx: second string
// r8: instance type of first string
// r9: instance type of second string
Label non_ascii_string_add_flat_result;
ASSERT(kStringEncodingMask == kAsciiStringTag);
__ testl(r8, Immediate(kAsciiStringTag));
__ j(zero, &non_ascii_string_add_flat_result);
__ testl(r9, Immediate(kAsciiStringTag));
__ j(zero, &string_add_runtime);
// Both strings are ascii strings. As they are short they are both flat.
__ AllocateAsciiString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
// rcx: result string
__ movq(rbx, rcx);
// Locate first character of result.
__ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// Locate first character of first argument
__ movl(rdi, FieldOperand(rax, String::kLengthOffset));
__ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// rax: first char of first argument
// rbx: result string
// rcx: first character of result
// rdx: second string
// rdi: length of first argument
GenerateCopyCharacters(masm, rcx, rax, rdi, true);
// Locate first character of second argument.
__ movl(rdi, FieldOperand(rdx, String::kLengthOffset));
__ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// rbx: result string
// rcx: next character of result
// rdx: first char of second argument
// rdi: length of second argument
GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
__ movq(rax, rbx);
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
// Handle creating a flat two byte result.
// rax: first string - known to be two byte
// rbx: length of resulting flat string
// rdx: second string
// r8: instance type of first string
// r9: instance type of first string
__ bind(&non_ascii_string_add_flat_result);
__ and_(r9, Immediate(kAsciiStringTag));
__ j(not_zero, &string_add_runtime);
// Both strings are two byte strings. As they are short they are both
// flat.
__ AllocateTwoByteString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
// rcx: result string
__ movq(rbx, rcx);
// Locate first character of result.
__ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// Locate first character of first argument.
__ movl(rdi, FieldOperand(rax, String::kLengthOffset));
__ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// rax: first char of first argument
// rbx: result string
// rcx: first character of result
// rdx: second argument
// rdi: length of first argument
GenerateCopyCharacters(masm, rcx, rax, rdi, false);
// Locate first character of second argument.
__ movl(rdi, FieldOperand(rdx, String::kLengthOffset));
__ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// rbx: result string
// rcx: next character of result
// rdx: first char of second argument
// rdi: length of second argument
GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
__ movq(rax, rbx);
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
// Just jump to runtime to add the two strings.
__ bind(&string_add_runtime);
__ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
}
void StringAddStub::GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
bool ascii) {
Label loop;
__ bind(&loop);
// This loop just copies one character at a time, as it is only used for very
// short strings.
if (ascii) {
__ movb(kScratchRegister, Operand(src, 0));
__ movb(Operand(dest, 0), kScratchRegister);
__ addq(src, Immediate(1));
__ addq(dest, Immediate(1));
} else {
__ movzxwl(kScratchRegister, Operand(src, 0));
__ movw(Operand(dest, 0), kScratchRegister);
__ addq(src, Immediate(2));
__ addq(dest, Immediate(2));
}
__ subl(count, Immediate(1));
__ j(not_zero, &loop);
}
#undef __ #undef __
#define __ masm. #define __ masm.

38
deps/v8/src/x64/codegen-x64.h

@ -436,7 +436,7 @@ class CodeGenerator: public AstVisitor {
void GenericBinaryOperation( void GenericBinaryOperation(
Token::Value op, Token::Value op,
SmiAnalysis* type, StaticType* type,
OverwriteMode overwrite_mode); OverwriteMode overwrite_mode);
// If possible, combine two constant smi values using op to produce // If possible, combine two constant smi values using op to produce
@ -449,7 +449,7 @@ class CodeGenerator: public AstVisitor {
void ConstantSmiBinaryOperation(Token::Value op, void ConstantSmiBinaryOperation(Token::Value op,
Result* operand, Result* operand,
Handle<Object> constant_operand, Handle<Object> constant_operand,
SmiAnalysis* type, StaticType* type,
bool reversed, bool reversed,
OverwriteMode overwrite_mode); OverwriteMode overwrite_mode);
@ -670,7 +670,8 @@ class GenericBinaryOpStub: public CodeStub {
mode_(mode), mode_(mode),
flags_(flags), flags_(flags),
args_in_registers_(false), args_in_registers_(false),
args_reversed_(false) { args_reversed_(false),
name_(NULL) {
use_sse3_ = CpuFeatures::IsSupported(SSE3); use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
} }
@ -689,6 +690,7 @@ class GenericBinaryOpStub: public CodeStub {
bool args_in_registers_; // Arguments passed in registers not on the stack. bool args_in_registers_; // Arguments passed in registers not on the stack.
bool args_reversed_; // Left and right argument are swapped. bool args_reversed_; // Left and right argument are swapped.
bool use_sse3_; bool use_sse3_;
char* name_;
const char* GetName(); const char* GetName();
@ -745,6 +747,36 @@ class GenericBinaryOpStub: public CodeStub {
}; };
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
};
class StringAddStub: public CodeStub {
public:
explicit StringAddStub(StringAddFlags flags) {
string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
}
private:
Major MajorKey() { return StringAdd; }
int MinorKey() { return string_check_ ? 0 : 1; }
void Generate(MacroAssembler* masm);
void GenerateCopyCharacters(MacroAssembler* masm,
Register desc,
Register src,
Register count,
bool ascii);
// Should the stub check whether arguments are strings?
bool string_check_;
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_H_ #endif // V8_X64_CODEGEN_X64_H_

354
deps/v8/src/x64/fast-codegen-x64.cc

@ -420,73 +420,97 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
Variable* var = decl->proxy()->var(); Variable* var = decl->proxy()->var();
ASSERT(var != NULL); // Must have been resolved. ASSERT(var != NULL); // Must have been resolved.
Slot* slot = var->slot(); Slot* slot = var->slot();
ASSERT(slot != NULL); // No global declarations here. Property* prop = var->AsProperty();
// We have 3 cases for slots: LOOKUP, LOCAL, CONTEXT. if (slot != NULL) {
switch (slot->type()) { switch (slot->type()) {
case Slot::LOOKUP: { case Slot::PARAMETER: // Fall through.
__ push(rsi); case Slot::LOCAL:
__ Push(var->name()); if (decl->mode() == Variable::CONST) {
// Declaration nodes are always introduced in one of two modes. __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
ASSERT(decl->mode() == Variable::VAR || decl->mode() == Variable::CONST); __ movq(Operand(rbp, SlotOffset(var->slot())), kScratchRegister);
PropertyAttributes attr = decl->mode() == Variable::VAR ? } else if (decl->fun() != NULL) {
NONE : READ_ONLY; Visit(decl->fun());
__ Push(Smi::FromInt(attr)); __ pop(Operand(rbp, SlotOffset(var->slot())));
// Push initial value, if any. }
// Note: For variables we must not push an initial value (such as break;
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value. case Slot::CONTEXT:
if (decl->mode() == Variable::CONST) { // The variable in the decl always resides in the current context.
__ Push(Factory::the_hole_value()); ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
} else if (decl->fun() != NULL) {
Visit(decl->fun());
} else {
__ Push(Smi::FromInt(0)); // no initial value!
}
__ CallRuntime(Runtime::kDeclareContextSlot, 4);
break;
}
case Slot::LOCAL:
if (decl->mode() == Variable::CONST) {
__ Move(Operand(rbp, SlotOffset(var->slot())),
Factory::the_hole_value());
} else if (decl->fun() != NULL) {
Visit(decl->fun());
__ pop(Operand(rbp, SlotOffset(var->slot())));
}
break;
case Slot::CONTEXT:
// The variable in the decl always resides in the current context.
ASSERT(function_->scope()->ContextChainLength(slot->var()->scope()) == 0);
if (decl->mode() == Variable::CONST) {
__ Move(rax, Factory::the_hole_value());
if (FLAG_debug_code) { if (FLAG_debug_code) {
// Check if we have the correct context pointer. // Check if we have the correct context pointer.
__ movq(rbx, CodeGenerator::ContextOperand(rsi, __ movq(rbx,
Context::FCONTEXT_INDEX)); CodeGenerator::ContextOperand(rsi, Context::FCONTEXT_INDEX));
__ cmpq(rbx, rsi); __ cmpq(rbx, rsi);
__ Check(equal, "Unexpected declaration in current context."); __ Check(equal, "Unexpected declaration in current context.");
} }
__ movq(CodeGenerator::ContextOperand(rsi, slot->index()), rax); if (decl->mode() == Variable::CONST) {
// No write barrier since the_hole_value is in old space. __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
ASSERT(!Heap::InNewSpace(*Factory::the_hole_value())); __ movq(CodeGenerator::ContextOperand(rsi, slot->index()),
} else if (decl->fun() != NULL) { kScratchRegister);
// No write barrier since the hole value is in old space.
} else if (decl->fun() != NULL) {
Visit(decl->fun());
__ pop(rax);
__ movq(CodeGenerator::ContextOperand(rsi, slot->index()), rax);
int offset = Context::SlotOffset(slot->index());
__ RecordWrite(rsi, offset, rax, rcx);
}
break;
case Slot::LOOKUP: {
__ push(rsi);
__ Push(var->name());
// Declaration nodes are always introduced in one of two modes.
ASSERT(decl->mode() == Variable::VAR ||
decl->mode() == Variable::CONST);
PropertyAttributes attr =
(decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
__ Push(Smi::FromInt(attr));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (decl->mode() == Variable::CONST) {
__ PushRoot(Heap::kTheHoleValueRootIndex);
} else if (decl->fun() != NULL) {
Visit(decl->fun());
} else {
__ Push(Smi::FromInt(0)); // no initial value!
}
__ CallRuntime(Runtime::kDeclareContextSlot, 4);
break;
}
}
} else if (prop != NULL) {
if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
// We are declaring a function or constant that rewrites to a
// property. Use (keyed) IC to set the initial value.
ASSERT_EQ(Expression::kValue, prop->obj()->context());
Visit(prop->obj());
ASSERT_EQ(Expression::kValue, prop->key()->context());
Visit(prop->key());
if (decl->fun() != NULL) {
ASSERT_EQ(Expression::kValue, decl->fun()->context());
Visit(decl->fun()); Visit(decl->fun());
__ pop(rax); __ pop(rax);
if (FLAG_debug_code) { } else {
// Check if we have the correct context pointer. __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
__ movq(rbx, CodeGenerator::ContextOperand(rsi,
Context::FCONTEXT_INDEX));
__ cmpq(rbx, rsi);
__ Check(equal, "Unexpected declaration in current context.");
}
__ movq(CodeGenerator::ContextOperand(rsi, slot->index()), rax);
int offset = Context::SlotOffset(slot->index());
__ RecordWrite(rsi, offset, rax, rcx);
} }
break;
default: Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
UNREACHABLE(); __ call(ic, RelocInfo::CODE_TARGET);
// Absence of a test rax instruction following the call
// indicates that none of the load was inlined.
// Value in rax is ignored (declarations are statements). Receiver
// and key on stack are discarded.
__ addq(rsp, Immediate(2 * kPointerSize));
}
} }
} }
@ -501,20 +525,6 @@ void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
} }
void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
Expression* expr = stmt->expression();
if (expr->AsLiteral() != NULL) {
__ Move(rax, expr->AsLiteral()->handle());
} else {
Visit(expr);
ASSERT_EQ(Expression::kValue, expr->context());
__ pop(rax);
}
EmitReturnSequence(stmt->statement_pos());
}
void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) { void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral"); Comment cmnt(masm_, "[ FunctionLiteral");
@ -535,14 +545,20 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) { void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy"); Comment cmnt(masm_, "[ VariableProxy");
Expression* rewrite = expr->var()->rewrite(); EmitVariableLoad(expr->var(), expr->context());
}
void FastCodeGenerator::EmitVariableLoad(Variable* var,
Expression::Context context) {
Expression* rewrite = var->rewrite();
if (rewrite == NULL) { if (rewrite == NULL) {
ASSERT(expr->var()->is_global()); ASSERT(var->is_global());
Comment cmnt(masm_, "Global variable"); Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in rcx and the global // Use inline caching. Variable name is passed in rcx and the global
// object on the stack. // object on the stack.
__ push(CodeGenerator::GlobalObject()); __ push(CodeGenerator::GlobalObject());
__ Move(rcx, expr->name()); __ Move(rcx, var->name());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT); __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
// A test rax instruction following the call is used by the IC to // A test rax instruction following the call is used by the IC to
@ -550,7 +566,7 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
// is no test rax instruction here. // is no test rax instruction here.
__ nop(); __ nop();
DropAndMove(expr->context(), rax); DropAndMove(context, rax);
} else if (rewrite->AsSlot() != NULL) { } else if (rewrite->AsSlot() != NULL) {
Slot* slot = rewrite->AsSlot(); Slot* slot = rewrite->AsSlot();
if (FLAG_debug_code) { if (FLAG_debug_code) {
@ -571,7 +587,7 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
UNREACHABLE(); UNREACHABLE();
} }
} }
Move(expr->context(), slot, rax); Move(context, slot, rax);
} else { } else {
// A variable has been rewritten into an explicit access to // A variable has been rewritten into an explicit access to
// an object property. // an object property.
@ -605,7 +621,7 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
// the call. It is treated specially by the LoadIC code. // the call. It is treated specially by the LoadIC code.
// Drop key and object left on the stack by IC, and push the result. // Drop key and object left on the stack by IC, and push the result.
DropAndMove(expr->context(), rax, 2); DropAndMove(context, rax, 2);
} }
} }
@ -639,31 +655,14 @@ void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral"); Comment cmnt(masm_, "[ ObjectLiteral");
Label boilerplate_exists;
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ movq(rbx, FieldOperand(rdi, JSFunction::kLiteralsOffset)); __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
int literal_offset =
FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
__ movq(rax, FieldOperand(rbx, literal_offset));
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &boilerplate_exists);
// Create boilerplate if it does not exist.
// Literal array (0).
__ push(rbx);
// Literal index (1).
__ Push(Smi::FromInt(expr->literal_index())); __ Push(Smi::FromInt(expr->literal_index()));
// Constant properties (2).
__ Push(expr->constant_properties()); __ Push(expr->constant_properties());
__ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3); if (expr->depth() > 1) {
__ bind(&boilerplate_exists); __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
// rax contains boilerplate.
// Clone boilerplate.
__ push(rax);
if (expr->depth() == 1) {
__ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
} else { } else {
__ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1); __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
} }
// If result_saved == true: The result is saved on top of the // If result_saved == true: The result is saved on top of the
@ -759,31 +758,14 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral"); Comment cmnt(masm_, "[ ArrayLiteral");
Label make_clone;
// Fetch the function's literals array.
__ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ movq(rbx, FieldOperand(rbx, JSFunction::kLiteralsOffset)); __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
// Check if the literal's boilerplate has been instantiated.
int offset =
FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
__ movq(rax, FieldOperand(rbx, offset));
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &make_clone);
// Instantiate the boilerplate.
__ push(rbx);
__ Push(Smi::FromInt(expr->literal_index())); __ Push(Smi::FromInt(expr->literal_index()));
__ Push(expr->literals()); __ Push(expr->literals());
__ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
__ bind(&make_clone);
// Clone the boilerplate.
__ push(rax);
if (expr->depth() > 1) { if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1); __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else { } else {
__ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1); __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} }
bool result_saved = false; // Is the result saved to the stack? bool result_saved = false; // Is the result saved to the stack?
@ -853,10 +835,37 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} }
void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop,
Expression::Context context) {
Literal* key = prop->key()->AsLiteral();
__ Move(rcx, key->handle());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
Move(context, rax);
}
void FastCodeGenerator::EmitKeyedPropertyLoad(Expression::Context context) {
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
Move(context, rax);
}
void FastCodeGenerator::EmitCompoundAssignmentOp(Token::Value op,
Expression::Context context) {
GenericBinaryOpStub stub(op,
NO_OVERWRITE,
NO_GENERIC_BINARY_FLAGS);
__ CallStub(&stub);
Move(context, rax);
}
void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) { void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable(); Variable* var = expr->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL); ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
if (var->is_global()) { if (var->is_global()) {
// Assignment to a global variable. Use inline caching for the // Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in rax, variable name in // assignment. Right-hand-side value is passed in rax, variable name in
@ -961,36 +970,6 @@ void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
UNREACHABLE(); UNREACHABLE();
break; break;
} }
} else {
Property* property = var->AsProperty();
ASSERT_NOT_NULL(property);
// A variable has been rewritten into a property on an object.
// Load object and key onto the stack.
Slot* object_slot = property->obj()->AsSlot();
ASSERT_NOT_NULL(object_slot);
Move(Expression::kValue, object_slot, rax);
Literal* key_literal = property->key()->AsLiteral();
ASSERT_NOT_NULL(key_literal);
Move(Expression::kValue, key_literal);
// Value to store was pushed before object and key on the stack.
__ movq(rax, Operand(rsp, 2 * kPointerSize));
// Arguments to ic is value in rax, object and key on stack.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
if (expr->context() == Expression::kEffect) {
__ addq(rsp, Immediate(3 * kPointerSize));
} else if (expr->context() == Expression::kValue) {
// Value is still on the stack in rsp[2 * kPointerSize]
__ addq(rsp, Immediate(2 * kPointerSize));
} else {
__ movq(rax, Operand(rsp, 2 * kPointerSize));
DropAndMove(expr->context(), rax, 3);
}
} }
} }
@ -1097,7 +1076,9 @@ void FastCodeGenerator::VisitProperty(Property* expr) {
} }
void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) { void FastCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> ignored,
RelocInfo::Mode mode) {
// Code common for calls using the IC. // Code common for calls using the IC.
ZoneList<Expression*>* args = expr->arguments(); ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length(); int arg_count = args->length();
@ -1110,7 +1091,7 @@ void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) {
// Call the IC initialization code. // Call the IC initialization code.
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
NOT_IN_LOOP); NOT_IN_LOOP);
__ call(ic, reloc_info); __ call(ic, mode);
// Restore context register. // Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS. // Discard the function left on TOS.
@ -1149,7 +1130,7 @@ void FastCodeGenerator::VisitCall(Call* expr) {
__ Push(var->name()); __ Push(var->name());
// Push global object as receiver for the call IC lookup. // Push global object as receiver for the call IC lookup.
__ push(CodeGenerator::GlobalObject()); __ push(CodeGenerator::GlobalObject());
EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT); EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
} else if (var != NULL && var->slot() != NULL && } else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) { var->slot()->type() == Slot::LOOKUP) {
// Call to a lookup slot. // Call to a lookup slot.
@ -1162,7 +1143,7 @@ void FastCodeGenerator::VisitCall(Call* expr) {
// Call to a named property, use call IC. // Call to a named property, use call IC.
__ Push(key->handle()); __ Push(key->handle());
Visit(prop->obj()); Visit(prop->obj());
EmitCallWithIC(expr, RelocInfo::CODE_TARGET); EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else { } else {
// Call to a keyed property, use keyed load IC followed by function // Call to a keyed property, use keyed load IC followed by function
// call. // call.
@ -1684,6 +1665,69 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
} }
void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
Move(expr->context(), rax);
}
Register FastCodeGenerator::result_register() { return rax; }
Register FastCodeGenerator::context_register() { return rsi; }
void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset),
static_cast<intptr_t>(frame_offset));
__ movq(Operand(rbp, frame_offset), value);
}
void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
__ movq(dst, CodeGenerator::ContextOperand(rsi, context_index));
}
// ----------------------------------------------------------------------------
// Non-local control flow support.
void FastCodeGenerator::EnterFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
// Cook return address on top of stack (smi encoded Code* delta)
__ movq(rdx, Operand(rsp, 0));
__ Move(rcx, masm_->CodeObject());
__ subq(rdx, rcx);
__ Integer32ToSmi(rdx, rdx);
__ movq(Operand(rsp, 0), rdx);
// Store result register while executing finally block.
__ push(result_register());
}
void FastCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
// Restore result register from stack.
__ pop(result_register());
// Uncook return address.
__ movq(rdx, Operand(rsp, 0));
__ SmiToInteger32(rdx, rdx);
__ Move(rcx, masm_->CodeObject());
__ addq(rdx, rcx);
__ movq(Operand(rsp, 0), rdx);
// And return.
__ ret(0);
}
void FastCodeGenerator::ThrowException() {
__ push(result_register());
__ CallRuntime(Runtime::kThrow, 1);
}
#undef __ #undef __

103
deps/v8/src/x64/ic-x64.cc

@ -48,9 +48,13 @@ namespace internal {
// must always call a backup property load that is complete. // must always call a backup property load that is complete.
// This function is safe to call if the receiver has fast properties, // This function is safe to call if the receiver has fast properties,
// or if name is not a symbol, and will jump to the miss_label in that case. // or if name is not a symbol, and will jump to the miss_label in that case.
static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label, static void GenerateDictionaryLoad(MacroAssembler* masm,
Register r0, Register r1, Register r2, Label* miss_label,
Register name) { Register r0,
Register r1,
Register r2,
Register name,
DictionaryCheck check_dictionary) {
// Register use: // Register use:
// //
// r0 - used to hold the property dictionary. // r0 - used to hold the property dictionary.
@ -86,10 +90,14 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
__ cmpb(r0, Immediate(JS_BUILTINS_OBJECT_TYPE)); __ cmpb(r0, Immediate(JS_BUILTINS_OBJECT_TYPE));
__ j(equal, miss_label); __ j(equal, miss_label);
// Check that the properties array is a dictionary. // Load properties array.
__ movq(r0, FieldOperand(r1, JSObject::kPropertiesOffset)); __ movq(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
__ Cmp(FieldOperand(r0, HeapObject::kMapOffset), Factory::hash_table_map());
__ j(not_equal, miss_label); if (check_dictionary == CHECK_DICTIONARY) {
// Check that the properties array is a dictionary.
__ Cmp(FieldOperand(r0, HeapObject::kMapOffset), Factory::hash_table_map());
__ j(not_equal, miss_label);
}
// Compute the capacity mask. // Compute the capacity mask.
const int kCapacityOffset = const int kCapacityOffset =
@ -246,7 +254,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- rsp[8] : name // -- rsp[8] : name
// -- rsp[16] : receiver // -- rsp[16] : receiver
// ----------------------------------- // -----------------------------------
Label slow, check_string, index_int, index_string, check_pixel_array; Label slow, check_string, index_int, index_string;
Label check_pixel_array, probe_dictionary;
// Load name and receiver. // Load name and receiver.
__ movq(rax, Operand(rsp, kPointerSize)); __ movq(rax, Operand(rsp, kPointerSize));
@ -319,14 +328,68 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ movl(rbx, FieldOperand(rax, String::kHashFieldOffset)); __ movl(rbx, FieldOperand(rax, String::kHashFieldOffset));
__ testl(rbx, Immediate(String::kIsArrayIndexMask)); __ testl(rbx, Immediate(String::kIsArrayIndexMask));
// If the string is a symbol, do a quick inline probe of the receiver's // Is the string a symbol?
// dictionary, if it exists.
__ j(not_zero, &index_string); // The value in rbx is used at jump target. __ j(not_zero, &index_string); // The value in rbx is used at jump target.
__ testb(FieldOperand(rdx, Map::kInstanceTypeOffset), __ testb(FieldOperand(rdx, Map::kInstanceTypeOffset),
Immediate(kIsSymbolMask)); Immediate(kIsSymbolMask));
__ j(zero, &slow); __ j(zero, &slow);
// Probe the dictionary leaving result in rcx.
GenerateDictionaryLoad(masm, &slow, rbx, rcx, rdx, rax); // If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary leaving result in rcx.
__ movq(rbx, FieldOperand(rcx, JSObject::kPropertiesOffset));
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset), Factory::hash_table_map());
__ j(equal, &probe_dictionary);
// Load the map of the receiver, compute the keyed lookup cache hash
// based on 32 bits of the map pointer and the string hash.
__ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
__ movl(rdx, rbx);
__ shr(rdx, Immediate(KeyedLookupCache::kMapHashShift));
__ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
__ shr(rax, Immediate(String::kHashShift));
__ xor_(rdx, rax);
__ and_(rdx, Immediate(KeyedLookupCache::kCapacityMask));
// Load the key (consisting of map and symbol) from the cache and
// check for match.
ExternalReference cache_keys
= ExternalReference::keyed_lookup_cache_keys();
__ movq(rdi, rdx);
__ shl(rdi, Immediate(kPointerSizeLog2 + 1));
__ movq(kScratchRegister, cache_keys);
__ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, 0));
__ j(not_equal, &slow);
__ movq(rdi, Operand(kScratchRegister, rdi, times_1, kPointerSize));
__ cmpq(Operand(rsp, kPointerSize), rdi);
__ j(not_equal, &slow);
// Get field offset which is a 32-bit integer and check that it is
// an in-object property.
ExternalReference cache_field_offsets
= ExternalReference::keyed_lookup_cache_field_offsets();
__ movq(kScratchRegister, cache_field_offsets);
__ movl(rax, Operand(kScratchRegister, rdx, times_4, 0));
__ movzxbq(rdx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
__ cmpq(rax, rdx);
__ j(above_equal, &slow);
// Load in-object property.
__ subq(rax, rdx);
__ movzxbq(rdx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ addq(rax, rdx);
__ movq(rax, FieldOperand(rcx, rax, times_pointer_size, 0));
__ ret(0);
// Do a quick inline probe of the receiver's dictionary, if it
// exists.
__ bind(&probe_dictionary);
GenerateDictionaryLoad(masm,
&slow,
rbx,
rcx,
rdx,
rax,
DICTIONARY_CHECK_DONE);
GenerateCheckNonObjectOrLoaded(masm, &slow, rcx); GenerateCheckNonObjectOrLoaded(masm, &slow, rcx);
__ movq(rax, rcx); __ movq(rax, rcx);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1); __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
@ -853,9 +916,7 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
} }
void CallIC::Generate(MacroAssembler* masm, void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
int argc,
ExternalReference const& f) {
// Get the receiver of the function from the stack; 1 ~ return address. // Get the receiver of the function from the stack; 1 ~ return address.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Get the name of the function to call from the stack. // Get the name of the function to call from the stack.
@ -872,7 +933,7 @@ void CallIC::Generate(MacroAssembler* masm,
// Call the entry. // Call the entry.
CEntryStub stub(1); CEntryStub stub(1);
__ movq(rax, Immediate(2)); __ movq(rax, Immediate(2));
__ movq(rbx, f); __ movq(rbx, ExternalReference(IC_Utility(kCallIC_Miss)));
__ CallStub(&stub); __ CallStub(&stub);
// Move result to rdi and exit the internal frame. // Move result to rdi and exit the internal frame.
@ -963,7 +1024,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Cache miss: Jump to runtime. // Cache miss: Jump to runtime.
__ bind(&miss); __ bind(&miss);
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss))); GenerateMiss(masm, argc);
} }
@ -971,8 +1032,8 @@ static void GenerateNormalHelper(MacroAssembler* masm,
int argc, int argc,
bool is_global_object, bool is_global_object,
Label* miss) { Label* miss) {
// Search dictionary - put result in register edx. // Search dictionary - put result in register rdx.
GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx); GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx, CHECK_DICTIONARY);
// Move the result to register rdi and check that it isn't a smi. // Move the result to register rdi and check that it isn't a smi.
__ movq(rdi, rdx); __ movq(rdi, rdx);
@ -1065,7 +1126,7 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// Cache miss: Jump to runtime. // Cache miss: Jump to runtime.
__ bind(&miss); __ bind(&miss);
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss))); GenerateMiss(masm, argc);
} }
@ -1196,9 +1257,9 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
Immediate(1 << Map::kIsAccessCheckNeeded)); Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &miss); __ j(not_zero, &miss);
// Search the dictionary placing the result in eax. // Search the dictionary placing the result in rax.
__ bind(&probe); __ bind(&probe);
GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx, rcx); GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx, rcx, CHECK_DICTIONARY);
GenerateCheckNonObjectOrLoaded(masm, &miss, rax); GenerateCheckNonObjectOrLoaded(masm, &miss, rax);
__ ret(0); __ ret(0);

125
deps/v8/src/x64/macro-assembler-x64.cc

@ -310,6 +310,12 @@ void MacroAssembler::CallStub(CodeStub* stub) {
} }
void MacroAssembler::TailCallStub(CodeStub* stub) {
ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
void MacroAssembler::StubReturn(int argc) { void MacroAssembler::StubReturn(int argc) {
ASSERT(argc >= 1 && generating_stub()); ASSERT(argc >= 1 && generating_stub());
ret((argc - 1) * kPointerSize); ret((argc - 1) * kPointerSize);
@ -1339,6 +1345,13 @@ void MacroAssembler::Push(Smi* source) {
} }
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
addq(rsp, Immediate(stack_elements * kPointerSize));
}
}
void MacroAssembler::Test(const Operand& src, Smi* source) { void MacroAssembler::Test(const Operand& src, Smi* source) {
intptr_t smi = reinterpret_cast<intptr_t>(source); intptr_t smi = reinterpret_cast<intptr_t>(source);
if (is_int32(smi)) { if (is_int32(smi)) {
@ -1425,6 +1438,16 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
} }
void MacroAssembler::PopTryHandler() {
ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
// Unlink this handler.
movq(kScratchRegister, ExternalReference(Top::k_handler_address));
pop(Operand(kScratchRegister, 0));
// Remove the remaining fields.
addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
}
void MacroAssembler::Ret() { void MacroAssembler::Ret() {
ret(0); ret(0);
} }
@ -2244,6 +2267,108 @@ void MacroAssembler::AllocateHeapNumber(Register result,
} }
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
ASSERT(kShortSize == 2);
// scratch1 = length * 2 + kObjectAlignmentMask.
lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
and_(scratch1, Immediate(~kObjectAlignmentMask));
// Allocate two byte string in new space.
AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
times_1,
scratch1,
result,
scratch2,
scratch3,
gc_required,
TAG_OBJECT);
// Set the map, length and hash field.
LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
movl(FieldOperand(result, String::kLengthOffset), length);
movl(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
void MacroAssembler::AllocateAsciiString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
movl(scratch1, length);
ASSERT(kCharSize == 1);
addq(scratch1, Immediate(kObjectAlignmentMask));
and_(scratch1, Immediate(~kObjectAlignmentMask));
// Allocate ascii string in new space.
AllocateInNewSpace(SeqAsciiString::kHeaderSize,
times_1,
scratch1,
result,
scratch2,
scratch3,
gc_required,
TAG_OBJECT);
// Set the map, length and hash field.
LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
movl(FieldOperand(result, String::kLengthOffset), length);
movl(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
void MacroAssembler::AllocateConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
AllocateInNewSpace(ConsString::kSize,
result,
scratch1,
scratch2,
gc_required,
TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
void MacroAssembler::AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
AllocateInNewSpace(ConsString::kSize,
result,
scratch1,
scratch2,
gc_required,
TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
void MacroAssembler::LoadContext(Register dst, int context_chain_length) { void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) { if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot. // Move up the chain of contexts to the context containing the slot.

35
deps/v8/src/x64/macro-assembler-x64.h

@ -400,7 +400,7 @@ class MacroAssembler: public Assembler {
void Test(const Operand& dst, Smi* source); void Test(const Operand& dst, Smi* source);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Macro instructions // Macro instructions.
// Load a register with a long value as efficiently as possible. // Load a register with a long value as efficiently as possible.
void Set(Register dst, int64_t x); void Set(Register dst, int64_t x);
@ -412,6 +412,8 @@ class MacroAssembler: public Assembler {
void Cmp(Register dst, Handle<Object> source); void Cmp(Register dst, Handle<Object> source);
void Cmp(const Operand& dst, Handle<Object> source); void Cmp(const Operand& dst, Handle<Object> source);
void Push(Handle<Object> source); void Push(Handle<Object> source);
void Drop(int stack_elements);
void Call(Label* target) { call(target); }
// Control Flow // Control Flow
void Jump(Address destination, RelocInfo::Mode rmode); void Jump(Address destination, RelocInfo::Mode rmode);
@ -443,6 +445,8 @@ class MacroAssembler: public Assembler {
// address must be pushed before calling this helper. // address must be pushed before calling this helper.
void PushTryHandler(CodeLocation try_location, HandlerType type); void PushTryHandler(CodeLocation try_location, HandlerType type);
// Unlink the stack handler on top of the stack from the try handler chain.
void PopTryHandler();
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Inline caching support // Inline caching support
@ -518,6 +522,32 @@ class MacroAssembler: public Assembler {
Register scratch, Register scratch,
Label* gc_required); Label* gc_required);
// Allocate a sequential string. All the header fields of the string object
// are initialized.
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required);
void AllocateAsciiString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required);
// Allocate a raw cons string object. Only the map field of the result is
// initialized.
void AllocateConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required);
void AllocateAsciiConsString(Register result,
Register scratch1,
Register scratch2,
Label* gc_required);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Support functions. // Support functions.
@ -557,6 +587,9 @@ class MacroAssembler: public Assembler {
// Call a code stub. // Call a code stub.
void CallStub(CodeStub* stub); void CallStub(CodeStub* stub);
// Tail call a code stub (jump).
void TailCallStub(CodeStub* stub);
// Return from a code stub after popping its arguments. // Return from a code stub after popping its arguments.
void StubReturn(int argc); void StubReturn(int argc);

47
deps/v8/src/x64/stub-cache-x64.cc

@ -956,8 +956,24 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
__ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset)); __ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function. // Check that the cell contains the same function.
__ Cmp(rdi, Handle<JSFunction>(function)); if (Heap::InNewSpace(function)) {
__ j(not_equal, &miss); // We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
// function can all use this call IC. Before we load through the
// function, we have to verify that it still is a function.
__ JumpIfSmi(rdi, &miss);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &miss);
// Check the shared function info. Make sure it hasn't changed.
__ Move(rcx, Handle<SharedFunctionInfo>(function->shared()));
__ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rcx);
__ j(not_equal, &miss);
} else {
__ Cmp(rdi, Handle<JSFunction>(function));
__ j(not_equal, &miss);
}
// Patch the receiver on the stack with the global proxy. // Patch the receiver on the stack with the global proxy.
if (object->IsGlobalObject()) { if (object->IsGlobalObject()) {
@ -987,10 +1003,10 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
} }
Object* LoadStubCompiler::CompileLoadCallback(JSObject* object, Object* LoadStubCompiler::CompileLoadCallback(String* name,
JSObject* object,
JSObject* holder, JSObject* holder,
AccessorInfo* callback, AccessorInfo* callback) {
String* name) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- rcx : name // -- rcx : name
// -- rsp[0] : return address // -- rsp[0] : return address
@ -999,8 +1015,11 @@ Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
Label miss; Label miss;
__ movq(rax, Operand(rsp, kPointerSize)); __ movq(rax, Operand(rsp, kPointerSize));
GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx, Failure* failure = Failure::InternalError();
callback, name, &miss); bool success = GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx,
callback, name, &miss, &failure);
if (!success) return failure;
__ bind(&miss); __ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC); GenerateLoadMiss(masm(), Code::LOAD_IC);
@ -1154,8 +1173,11 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
__ Cmp(rax, Handle<String>(name)); __ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss); __ j(not_equal, &miss);
GenerateLoadCallback(receiver, holder, rcx, rax, rbx, rdx, Failure* failure = Failure::InternalError();
callback, name, &miss); bool success = GenerateLoadCallback(receiver, holder, rcx, rax, rbx, rdx,
callback, name, &miss, &failure);
if (!success) return failure;
__ bind(&miss); __ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_callback, 1); __ DecrementCounter(&Counters::keyed_load_callback, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@ -1610,7 +1632,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
} }
void StubCompiler::GenerateLoadCallback(JSObject* object, bool StubCompiler::GenerateLoadCallback(JSObject* object,
JSObject* holder, JSObject* holder,
Register receiver, Register receiver,
Register name_reg, Register name_reg,
@ -1618,7 +1640,8 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
Register scratch2, Register scratch2,
AccessorInfo* callback, AccessorInfo* callback,
String* name, String* name,
Label* miss) { Label* miss,
Failure** failure) {
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss); __ JumpIfSmi(receiver, miss);
@ -1641,6 +1664,8 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
ExternalReference load_callback_property = ExternalReference load_callback_property =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallRuntime(load_callback_property, 5, 1); __ TailCallRuntime(load_callback_property, 5, 1);
return true;
} }

34
deps/v8/test/cctest/test-api.cc

@ -447,6 +447,40 @@ THREADED_TEST(UsingExternalAsciiString) {
} }
THREADED_TEST(ScavengeExternalString) {
TestResource::dispose_count = 0;
{
v8::HandleScope scope;
uint16_t* two_byte_string = AsciiToTwoByteString("test string");
Local<String> string =
String::NewExternal(new TestResource(two_byte_string));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
i::Heap::CollectGarbage(0, i::NEW_SPACE);
CHECK(i::Heap::InNewSpace(*istring));
CHECK_EQ(0, TestResource::dispose_count);
}
i::Heap::CollectGarbage(0, i::NEW_SPACE);
CHECK_EQ(1, TestResource::dispose_count);
}
THREADED_TEST(ScavengeExternalAsciiString) {
TestAsciiResource::dispose_count = 0;
{
v8::HandleScope scope;
const char* one_byte_string = "test string";
Local<String> string = String::NewExternal(
new TestAsciiResource(i::StrDup(one_byte_string)));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
i::Heap::CollectGarbage(0, i::NEW_SPACE);
CHECK(i::Heap::InNewSpace(*istring));
CHECK_EQ(0, TestAsciiResource::dispose_count);
}
i::Heap::CollectGarbage(0, i::NEW_SPACE);
CHECK_EQ(1, TestAsciiResource::dispose_count);
}
THREADED_TEST(StringConcat) { THREADED_TEST(StringConcat) {
{ {
v8::HandleScope scope; v8::HandleScope scope;

33
deps/v8/test/cctest/test-debug.cc

@ -3141,6 +3141,39 @@ TEST(DisableBreak) {
CheckDebuggerUnloaded(); CheckDebuggerUnloaded();
} }
static const char* kSimpleExtensionSource =
"(function Foo() {"
" return 4;"
"})() ";
// http://crbug.com/28933
// Test that debug break is disabled when bootstrapper is active.
TEST(NoBreakWhenBootstrapping) {
v8::HandleScope scope;
// Register a debug event listener which sets the break flag and counts.
v8::Debug::SetDebugEventListener(DebugEventCounter);
// Set the debug break flag.
v8::Debug::DebugBreak();
break_point_hit_count = 0;
{
// Create a context with an extension to make sure that some JavaScript
// code is executed during bootstrapping.
v8::RegisterExtension(new v8::Extension("simpletest",
kSimpleExtensionSource));
const char* extension_names[] = { "simpletest" };
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Persistent<v8::Context> context = v8::Context::New(&extensions);
context.Dispose();
}
// Check that no DebugBreak events occured during the context creation.
CHECK_EQ(0, break_point_hit_count);
// Get rid of the debug event listener.
v8::Debug::SetDebugEventListener(NULL);
CheckDebuggerUnloaded();
}
static v8::Handle<v8::Array> NamedEnum(const v8::AccessorInfo&) { static v8::Handle<v8::Array> NamedEnum(const v8::AccessorInfo&) {
v8::Handle<v8::Array> result = v8::Array::New(3); v8::Handle<v8::Array> result = v8::Array::New(3);

12
deps/v8/test/cctest/test-macro-assembler-x64.cc

@ -91,14 +91,14 @@ typedef int (*F0)();
TEST(Smi) { TEST(Smi) {
// Check that C++ Smi operations work as expected. // Check that C++ Smi operations work as expected.
intptr_t test_numbers[] = { int64_t test_numbers[] = {
0, 1, -1, 127, 128, -128, -129, 255, 256, -256, -257, 0, 1, -1, 127, 128, -128, -129, 255, 256, -256, -257,
Smi::kMaxValue, static_cast<intptr_t>(Smi::kMaxValue) + 1, Smi::kMaxValue, static_cast<int64_t>(Smi::kMaxValue) + 1,
Smi::kMinValue, static_cast<intptr_t>(Smi::kMinValue) - 1 Smi::kMinValue, static_cast<int64_t>(Smi::kMinValue) - 1
}; };
int test_number_count = 15; int test_number_count = 15;
for (int i = 0; i < test_number_count; i++) { for (int i = 0; i < test_number_count; i++) {
intptr_t number = test_numbers[i]; int64_t number = test_numbers[i];
bool is_valid = Smi::IsValid(number); bool is_valid = Smi::IsValid(number);
bool is_in_range = number >= Smi::kMinValue && number <= Smi::kMaxValue; bool is_in_range = number >= Smi::kMinValue && number <= Smi::kMaxValue;
CHECK_EQ(is_in_range, is_valid); CHECK_EQ(is_in_range, is_valid);
@ -108,8 +108,8 @@ TEST(Smi) {
Smi* smi_from_int = Smi::FromInt(static_cast<int32_t>(number)); Smi* smi_from_int = Smi::FromInt(static_cast<int32_t>(number));
CHECK_EQ(smi_from_int, smi_from_intptr); CHECK_EQ(smi_from_int, smi_from_intptr);
} }
int smi_value = smi_from_intptr->value(); int64_t smi_value = smi_from_intptr->value();
CHECK_EQ(number, static_cast<intptr_t>(smi_value)); CHECK_EQ(number, smi_value);
} }
} }
} }

35
deps/v8/test/mjsunit/compiler/thisfunction.js

@ -0,0 +1,35 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --always_fast_compiler
// Test reference to this-function.
var g = (function f(x) {
if (x == 1) return 42; else return f(1);
})(0);
assertEquals(42, g);

1
deps/v8/test/mjsunit/fuzz-natives.js

@ -129,7 +129,6 @@ var knownProblems = {
"Log": true, "Log": true,
"DeclareGlobals": true, "DeclareGlobals": true,
"CollectStackTrace": true,
"PromoteScheduledException": true, "PromoteScheduledException": true,
"DeleteHandleScopeExtensions": true "DeleteHandleScopeExtensions": true
}; };

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save