Browse Source

Merge branch 'master' into net2

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
7eb126474d
  1. 2
      bin/node-waf
  2. 19
      deps/v8/ChangeLog
  3. 40
      deps/v8/SConstruct
  4. 30
      deps/v8/include/v8.h
  5. 2
      deps/v8/src/SConscript
  6. 1
      deps/v8/src/accessors.cc
  7. 62
      deps/v8/src/api.cc
  8. 94
      deps/v8/src/arm/assembler-arm.cc
  9. 172
      deps/v8/src/arm/assembler-arm.h
  10. 4
      deps/v8/src/arm/builtins-arm.cc
  11. 347
      deps/v8/src/arm/codegen-arm.cc
  12. 43
      deps/v8/src/arm/codegen-arm.h
  13. 4
      deps/v8/src/arm/fast-codegen-arm.cc
  14. 1
      deps/v8/src/arm/full-codegen-arm.cc
  15. 21
      deps/v8/src/arm/ic-arm.cc
  16. 64
      deps/v8/src/arm/macro-assembler-arm.cc
  17. 28
      deps/v8/src/arm/macro-assembler-arm.h
  18. 4
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  19. 12
      deps/v8/src/arm/stub-cache-arm.cc
  20. 21
      deps/v8/src/arm/virtual-frame-arm.cc
  21. 6
      deps/v8/src/arm/virtual-frame-arm.h
  22. 2
      deps/v8/src/array.js
  23. 10
      deps/v8/src/assembler.cc
  24. 3
      deps/v8/src/assembler.h
  25. 5
      deps/v8/src/ast.cc
  26. 61
      deps/v8/src/ast.h
  27. 13
      deps/v8/src/bootstrapper.cc
  28. 376
      deps/v8/src/builtins.cc
  29. 15
      deps/v8/src/code-stubs.cc
  30. 11
      deps/v8/src/code-stubs.h
  31. 5
      deps/v8/src/codegen.cc
  32. 70
      deps/v8/src/compilation-cache.cc
  33. 38
      deps/v8/src/compiler.cc
  34. 7
      deps/v8/src/compiler.h
  35. 2
      deps/v8/src/contexts.h
  36. 26
      deps/v8/src/conversions-inl.h
  37. 3
      deps/v8/src/conversions.h
  38. 906
      deps/v8/src/data-flow.cc
  39. 391
      deps/v8/src/data-flow.h
  40. 29
      deps/v8/src/date-delay.js
  41. 50
      deps/v8/src/debug-delay.js
  42. 54
      deps/v8/src/debug.cc
  43. 7
      deps/v8/src/debug.h
  44. 3
      deps/v8/src/factory.h
  45. 1
      deps/v8/src/fast-codegen.h
  46. 9
      deps/v8/src/flag-definitions.h
  47. 1
      deps/v8/src/frame-element.cc
  48. 33
      deps/v8/src/frame-element.h
  49. 1
      deps/v8/src/frames.cc
  50. 2
      deps/v8/src/globals.h
  51. 15
      deps/v8/src/handles.cc
  52. 8
      deps/v8/src/handles.h
  53. 12
      deps/v8/src/heap-inl.h
  54. 1
      deps/v8/src/heap-profiler.cc
  55. 2
      deps/v8/src/heap-profiler.h
  56. 119
      deps/v8/src/heap.cc
  57. 63
      deps/v8/src/heap.h
  58. 114
      deps/v8/src/ia32/assembler-ia32.cc
  59. 19
      deps/v8/src/ia32/assembler-ia32.h
  60. 95
      deps/v8/src/ia32/builtins-ia32.cc
  61. 1750
      deps/v8/src/ia32/codegen-ia32.cc
  62. 97
      deps/v8/src/ia32/codegen-ia32.h
  63. 9
      deps/v8/src/ia32/debug-ia32.cc
  64. 44
      deps/v8/src/ia32/disasm-ia32.cc
  65. 1
      deps/v8/src/ia32/fast-codegen-ia32.cc
  66. 37
      deps/v8/src/ia32/full-codegen-ia32.cc
  67. 279
      deps/v8/src/ia32/ic-ia32.cc
  68. 57
      deps/v8/src/ia32/macro-assembler-ia32.cc
  69. 31
      deps/v8/src/ia32/macro-assembler-ia32.h
  70. 49
      deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
  71. 15
      deps/v8/src/ia32/regexp-macro-assembler-ia32.h
  72. 1
      deps/v8/src/ia32/register-allocator-ia32.cc
  73. 19
      deps/v8/src/ia32/stub-cache-ia32.cc
  74. 154
      deps/v8/src/ia32/virtual-frame-ia32.cc
  75. 20
      deps/v8/src/ia32/virtual-frame-ia32.h
  76. 145
      deps/v8/src/ic.cc
  77. 31
      deps/v8/src/ic.h
  78. 1
      deps/v8/src/jsregexp.h
  79. 2
      deps/v8/src/jump-target-inl.h
  80. 13
      deps/v8/src/jump-target.cc
  81. 1
      deps/v8/src/jump-target.h
  82. 426
      deps/v8/src/liveedit-delay.js
  83. 404
      deps/v8/src/liveedit.cc
  84. 28
      deps/v8/src/liveedit.h
  85. 34
      deps/v8/src/log.cc
  86. 7
      deps/v8/src/log.h
  87. 10
      deps/v8/src/macros.py
  88. 8
      deps/v8/src/math.js
  89. 1
      deps/v8/src/messages.cc
  90. 1
      deps/v8/src/messages.js
  91. 45
      deps/v8/src/mips/codegen-mips.cc
  92. 14
      deps/v8/src/mips/codegen-mips.h
  93. 20
      deps/v8/src/mips/fast-codegen-mips.cc
  94. 5
      deps/v8/src/mips/full-codegen-mips.cc
  95. 25
      deps/v8/src/mips/ic-mips.cc
  96. 1
      deps/v8/src/mips/jump-target-mips.cc
  97. 31
      deps/v8/src/mips/macro-assembler-mips.cc
  98. 47
      deps/v8/src/mips/macro-assembler-mips.h
  99. 31
      deps/v8/src/mips/stub-cache-mips.cc
  100. 12
      deps/v8/src/mips/virtual-frame-mips.cc

2
bin/node-waf

@ -3,7 +3,7 @@ import os, sys
join = os.path.join
bindir = os.path.dirname(__file__)
bindir = os.path.dirname(os.path.realpath(__file__))
prefix = join(bindir, "..")
wafdir = join(prefix, "lib", "node")

19
deps/v8/ChangeLog

@ -1,3 +1,21 @@
2010-03-10: Version 2.1.3
Added API method for context-disposal notifications.
Added API method for accessing elements by integer index.
Added missing implementation of Uint32::Value and Value::IsUint32
API methods.
Added IsExecutionTerminating API method.
Disabled strict aliasing for GCC 4.4.
Fixed string-concatenation bug (issue 636).
Performance improvements on all platforms.
2010-02-23: Version 2.1.2
Fix a crash bug caused by wrong assert.
@ -6,6 +24,7 @@
Performance improvements on all platforms.
2010-02-19: Version 2.1.1
[ES5] Implemented Object.defineProperty.

40
deps/v8/SConstruct

@ -46,8 +46,8 @@ if ANDROID_TOP is None:
# on linux we need these compiler flags to avoid crashes in the v8 test suite
# and avoid dtoa.c strict aliasing issues
if os.environ.get('GCC_VERSION') == '44':
GCC_EXTRA_CCFLAGS = ['-fno-tree-vrp']
GCC_DTOA_EXTRA_CCFLAGS = ['-fno-strict-aliasing']
GCC_EXTRA_CCFLAGS = ['-fno-tree-vrp', '-fno-strict-aliasing']
GCC_DTOA_EXTRA_CCFLAGS = []
else:
GCC_EXTRA_CCFLAGS = []
GCC_DTOA_EXTRA_CCFLAGS = []
@ -255,8 +255,16 @@ LIBRARY_FLAGS = {
},
'msvcltcg:on': {
'CCFLAGS': ['/GL'],
'LINKFLAGS': ['/LTCG'],
'ARFLAGS': ['/LTCG'],
'pgo:off': {
'LINKFLAGS': ['/LTCG'],
},
'pgo:instrument': {
'LINKFLAGS': ['/LTCG:PGI']
},
'pgo:optimize': {
'LINKFLAGS': ['/LTCG:PGO']
}
}
}
}
@ -267,6 +275,7 @@ V8_EXTRA_FLAGS = {
'gcc': {
'all': {
'WARNINGFLAGS': ['-Wall',
'-Werror',
'-W',
'-Wno-unused-parameter',
'-Wnon-virtual-dtor']
@ -526,7 +535,15 @@ SAMPLE_FLAGS = {
},
'msvcltcg:on': {
'CCFLAGS': ['/GL'],
'LINKFLAGS': ['/LTCG'],
'pgo:off': {
'LINKFLAGS': ['/LTCG'],
},
},
'pgo:instrument': {
'LINKFLAGS': ['/LTCG:PGI']
},
'pgo:optimize': {
'LINKFLAGS': ['/LTCG:PGO']
}
},
'arch:ia32': {
@ -710,6 +727,11 @@ SIMPLE_OPTIONS = {
'values': ['arm', 'thumb2', 'none'],
'default': 'none',
'help': 'generate thumb2 instructions instead of arm instructions (default)'
},
'pgo': {
'values': ['off', 'instrument', 'optimize'],
'default': 'off',
'help': 'select profile guided optimization variant',
}
}
@ -797,6 +819,8 @@ def VerifyOptions(env):
Abort("Shared Object soname not applicable for Windows.")
if env['soname'] == 'on' and env['library'] == 'static':
Abort("Shared Object soname not applicable for static library.")
if env['os'] != 'win32' and env['pgo'] != 'off':
Abort("Profile guided optimization only supported on Windows.")
for (name, option) in SIMPLE_OPTIONS.iteritems():
if (not option.get('default')) and (name not in ARGUMENTS):
message = ("A value for option %s must be specified (%s)." %
@ -882,7 +906,7 @@ class BuildContext(object):
env['ENV'] = self.env_overrides
def PostprocessOptions(options):
def PostprocessOptions(options, os):
# Adjust architecture if the simulator option has been set
if (options['simulator'] != 'none') and (options['arch'] != options['simulator']):
if 'arch' in ARGUMENTS:
@ -893,6 +917,10 @@ def PostprocessOptions(options):
# Print a warning if profiling is enabled without profiling support
print "Warning: forcing profilingsupport on when prof is on"
options['profilingsupport'] = 'on'
if os == 'win32' and options['pgo'] != 'off' and options['msvcltcg'] == 'off':
if 'msvcltcg' in ARGUMENTS:
print "Warning: forcing msvcltcg on as it is required for pgo (%s)" % options['pgo']
options['msvcltcg'] = 'on'
if (options['armvariant'] == 'none' and options['arch'] == 'arm'):
options['armvariant'] = 'arm'
if (options['armvariant'] != 'none' and options['arch'] != 'arm'):
@ -923,7 +951,7 @@ def BuildSpecific(env, mode, env_overrides):
options = {'mode': mode}
for option in SIMPLE_OPTIONS:
options[option] = env[option]
PostprocessOptions(options)
PostprocessOptions(options, env['os'])
context = BuildContext(options, env_overrides, samples=SplitList(env['sample']))

30
deps/v8/include/v8.h

@ -261,6 +261,10 @@ template <class T> class V8EXPORT_INLINE Handle {
return Handle<T>(T::Cast(*that));
}
template <class S> inline Handle<S> As() {
return Handle<S>::Cast(*this);
}
private:
T* val_;
};
@ -295,6 +299,10 @@ template <class T> class V8EXPORT_INLINE Local : public Handle<T> {
return Local<T>(T::Cast(*that));
}
template <class S> inline Local<S> As() {
return Local<S>::Cast(*this);
}
/** Create a local handle for the content of another handle.
* The referee is kept alive by the local handle even when
* the original handle is destroyed/disposed.
@ -368,6 +376,10 @@ template <class T> class V8EXPORT_INLINE Persistent : public Handle<T> {
return Persistent<T>(T::Cast(*that));
}
template <class S> inline Persistent<S> As() {
return Persistent<S>::Cast(*this);
}
/**
* Creates a new persistent handle for an existing local or
* persistent handle.
@ -559,7 +571,7 @@ class V8EXPORT Script {
* object (typically a string) as the script's origin.
*
* \param source Script source code.
* \patam file_name file name object (typically a string) to be used
* \param file_name file name object (typically a string) to be used
* as the script's origin.
* \return Compiled script object (context independent; when run it
* will use the currently entered context).
@ -754,6 +766,11 @@ class V8EXPORT Value : public Data {
*/
bool IsInt32() const;
/**
* Returns true if this value is a 32-bit signed integer.
*/
bool IsUint32() const;
/**
* Returns true if this value is a Date.
*/
@ -1178,6 +1195,9 @@ class V8EXPORT Object : public Value {
Handle<Value> value,
PropertyAttribute attribs = None);
bool Set(uint32_t index,
Handle<Value> value);
// Sets a local property on this object bypassing interceptors and
// overriding accessors or read-only properties.
//
@ -1192,6 +1212,8 @@ class V8EXPORT Object : public Value {
Local<Value> Get(Handle<Value> key);
Local<Value> Get(uint32_t index);
// TODO(1245389): Replace the type-specific versions of these
// functions with generic ones that accept a Handle<Value> key.
bool Has(Handle<String> key);
@ -2485,9 +2507,11 @@ class V8EXPORT V8 {
/**
* Optional notification that a context has been disposed. V8 uses
* these notifications to guide the garbage collection heuristic.
* these notifications to guide the GC heuristic. Returns the number
* of context disposals - including this one - since the last time
* V8 had a chance to clean up.
*/
static void ContextDisposedNotification();
static int ContextDisposedNotification();
private:
V8();

2
deps/v8/src/SConscript

@ -97,7 +97,6 @@ SOURCES = {
token.cc
top.cc
unicode.cc
usage-analyzer.cc
utils.cc
v8-counters.cc
v8.cc
@ -249,6 +248,7 @@ math.js
messages.js
apinatives.js
debug-delay.js
liveedit-delay.js
mirror-delay.js
date-delay.js
regexp-delay.js

1
deps/v8/src/accessors.cc

@ -32,7 +32,6 @@
#include "factory.h"
#include "scopeinfo.h"
#include "top.h"
#include "zone-inl.h"
namespace v8 {
namespace internal {

62
deps/v8/src/api.cc

@ -34,9 +34,11 @@
#include "debug.h"
#include "execution.h"
#include "global-handles.h"
#include "messages.h"
#include "platform.h"
#include "serialize.h"
#include "snapshot.h"
#include "top.h"
#include "utils.h"
#include "v8threads.h"
#include "version.h"
@ -1569,6 +1571,18 @@ bool Value::IsInt32() const {
}
bool Value::IsUint32() const {
if (IsDeadCheck("v8::Value::IsUint32()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) return i::Smi::cast(*obj)->value() >= 0;
if (obj->IsNumber()) {
double value = obj->Number();
return i::FastUI2D(i::FastD2UI(value)) == value;
}
return false;
}
bool Value::IsDate() const {
if (IsDeadCheck("v8::Value::IsDate()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
@ -1974,6 +1988,23 @@ bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
}
bool v8::Object::Set(uint32_t index, v8::Handle<Value> value) {
ON_BAILOUT("v8::Object::Set()", return false);
ENTER_V8;
HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
EXCEPTION_PREAMBLE();
i::Handle<i::Object> obj = i::SetElement(
self,
index,
value_obj);
has_pending_exception = obj.is_null();
EXCEPTION_BAILOUT_CHECK(false);
return true;
}
bool v8::Object::ForceSet(v8::Handle<Value> key,
v8::Handle<Value> value,
v8::PropertyAttribute attribs) {
@ -2022,6 +2053,18 @@ Local<Value> v8::Object::Get(v8::Handle<Value> key) {
}
Local<Value> v8::Object::Get(uint32_t index) {
ON_BAILOUT("v8::Object::Get()", return Local<v8::Value>());
ENTER_V8;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
EXCEPTION_PREAMBLE();
i::Handle<i::Object> result = i::GetElement(self, index);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(Local<Value>());
return Utils::ToLocal(result);
}
Local<Value> v8::Object::GetPrototype() {
ON_BAILOUT("v8::Object::GetPrototype()", return Local<v8::Value>());
ENTER_V8;
@ -2614,7 +2657,7 @@ int String::WriteAscii(char* buffer, int start, int length) const {
StringTracker::RecordWrite(str);
// Flatten the string for efficiency. This applies whether we are
// using StringInputBuffer or Get(i) to access the characters.
str->TryFlattenIfNotFlat();
str->TryFlatten();
int end = length;
if ( (length == -1) || (length > str->length() - start) )
end = str->length() - start;
@ -2727,6 +2770,17 @@ int32_t Int32::Value() const {
}
uint32_t Uint32::Value() const {
if (IsDeadCheck("v8::Uint32::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
return i::Smi::cast(*obj)->value();
} else {
return static_cast<uint32_t>(obj->Number());
}
}
int v8::Object::InternalFieldCount() {
if (IsDeadCheck("v8::Object::InternalFieldCount()")) return 0;
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
@ -2820,9 +2874,9 @@ void v8::V8::LowMemoryNotification() {
}
void v8::V8::ContextDisposedNotification() {
if (!i::V8::IsRunning()) return;
i::Heap::NotifyContextDisposed();
int v8::V8::ContextDisposedNotification() {
if (!i::V8::IsRunning()) return 0;
return i::Heap::NotifyContextDisposed();
}

94
deps/v8/src/arm/assembler-arm.cc

@ -80,100 +80,6 @@ void CpuFeatures::Probe() {
}
// -----------------------------------------------------------------------------
// Implementation of Register and CRegister
Register no_reg = { -1 };
Register r0 = { 0 };
Register r1 = { 1 };
Register r2 = { 2 };
Register r3 = { 3 };
Register r4 = { 4 };
Register r5 = { 5 };
Register r6 = { 6 };
Register r7 = { 7 };
Register r8 = { 8 }; // Used as context register.
Register r9 = { 9 };
Register r10 = { 10 }; // Used as roots register.
Register fp = { 11 };
Register ip = { 12 };
Register sp = { 13 };
Register lr = { 14 };
Register pc = { 15 };
CRegister no_creg = { -1 };
CRegister cr0 = { 0 };
CRegister cr1 = { 1 };
CRegister cr2 = { 2 };
CRegister cr3 = { 3 };
CRegister cr4 = { 4 };
CRegister cr5 = { 5 };
CRegister cr6 = { 6 };
CRegister cr7 = { 7 };
CRegister cr8 = { 8 };
CRegister cr9 = { 9 };
CRegister cr10 = { 10 };
CRegister cr11 = { 11 };
CRegister cr12 = { 12 };
CRegister cr13 = { 13 };
CRegister cr14 = { 14 };
CRegister cr15 = { 15 };
// Support for the VFP registers s0 to s31 (d0 to d15).
// Note that "sN:sM" is the same as "dN/2".
SwVfpRegister s0 = { 0 };
SwVfpRegister s1 = { 1 };
SwVfpRegister s2 = { 2 };
SwVfpRegister s3 = { 3 };
SwVfpRegister s4 = { 4 };
SwVfpRegister s5 = { 5 };
SwVfpRegister s6 = { 6 };
SwVfpRegister s7 = { 7 };
SwVfpRegister s8 = { 8 };
SwVfpRegister s9 = { 9 };
SwVfpRegister s10 = { 10 };
SwVfpRegister s11 = { 11 };
SwVfpRegister s12 = { 12 };
SwVfpRegister s13 = { 13 };
SwVfpRegister s14 = { 14 };
SwVfpRegister s15 = { 15 };
SwVfpRegister s16 = { 16 };
SwVfpRegister s17 = { 17 };
SwVfpRegister s18 = { 18 };
SwVfpRegister s19 = { 19 };
SwVfpRegister s20 = { 20 };
SwVfpRegister s21 = { 21 };
SwVfpRegister s22 = { 22 };
SwVfpRegister s23 = { 23 };
SwVfpRegister s24 = { 24 };
SwVfpRegister s25 = { 25 };
SwVfpRegister s26 = { 26 };
SwVfpRegister s27 = { 27 };
SwVfpRegister s28 = { 28 };
SwVfpRegister s29 = { 29 };
SwVfpRegister s30 = { 30 };
SwVfpRegister s31 = { 31 };
DwVfpRegister d0 = { 0 };
DwVfpRegister d1 = { 1 };
DwVfpRegister d2 = { 2 };
DwVfpRegister d3 = { 3 };
DwVfpRegister d4 = { 4 };
DwVfpRegister d5 = { 5 };
DwVfpRegister d6 = { 6 };
DwVfpRegister d7 = { 7 };
DwVfpRegister d8 = { 8 };
DwVfpRegister d9 = { 9 };
DwVfpRegister d10 = { 10 };
DwVfpRegister d11 = { 11 };
DwVfpRegister d12 = { 12 };
DwVfpRegister d13 = { 13 };
DwVfpRegister d14 = { 14 };
DwVfpRegister d15 = { 15 };
// -----------------------------------------------------------------------------
// Implementation of RelocInfo

172
deps/v8/src/arm/assembler-arm.h

@ -84,25 +84,24 @@ struct Register {
int code_;
};
extern Register no_reg;
extern Register r0;
extern Register r1;
extern Register r2;
extern Register r3;
extern Register r4;
extern Register r5;
extern Register r6;
extern Register r7;
extern Register r8;
extern Register r9;
extern Register r10;
extern Register fp;
extern Register ip;
extern Register sp;
extern Register lr;
extern Register pc;
const Register no_reg = { -1 };
const Register r0 = { 0 };
const Register r1 = { 1 };
const Register r2 = { 2 };
const Register r3 = { 3 };
const Register r4 = { 4 };
const Register r5 = { 5 };
const Register r6 = { 6 };
const Register r7 = { 7 };
const Register r8 = { 8 }; // Used as context register.
const Register r9 = { 9 };
const Register r10 = { 10 }; // Used as roots register.
const Register fp = { 11 };
const Register ip = { 12 };
const Register sp = { 13 };
const Register lr = { 14 };
const Register pc = { 15 };
// Single word VFP register.
struct SwVfpRegister {
@ -139,57 +138,57 @@ struct DwVfpRegister {
};
// Support for VFP registers s0 to s31 (d0 to d15).
// Support for the VFP registers s0 to s31 (d0 to d15).
// Note that "s(N):s(N+1)" is the same as "d(N/2)".
extern SwVfpRegister s0;
extern SwVfpRegister s1;
extern SwVfpRegister s2;
extern SwVfpRegister s3;
extern SwVfpRegister s4;
extern SwVfpRegister s5;
extern SwVfpRegister s6;
extern SwVfpRegister s7;
extern SwVfpRegister s8;
extern SwVfpRegister s9;
extern SwVfpRegister s10;
extern SwVfpRegister s11;
extern SwVfpRegister s12;
extern SwVfpRegister s13;
extern SwVfpRegister s14;
extern SwVfpRegister s15;
extern SwVfpRegister s16;
extern SwVfpRegister s17;
extern SwVfpRegister s18;
extern SwVfpRegister s19;
extern SwVfpRegister s20;
extern SwVfpRegister s21;
extern SwVfpRegister s22;
extern SwVfpRegister s23;
extern SwVfpRegister s24;
extern SwVfpRegister s25;
extern SwVfpRegister s26;
extern SwVfpRegister s27;
extern SwVfpRegister s28;
extern SwVfpRegister s29;
extern SwVfpRegister s30;
extern SwVfpRegister s31;
extern DwVfpRegister d0;
extern DwVfpRegister d1;
extern DwVfpRegister d2;
extern DwVfpRegister d3;
extern DwVfpRegister d4;
extern DwVfpRegister d5;
extern DwVfpRegister d6;
extern DwVfpRegister d7;
extern DwVfpRegister d8;
extern DwVfpRegister d9;
extern DwVfpRegister d10;
extern DwVfpRegister d11;
extern DwVfpRegister d12;
extern DwVfpRegister d13;
extern DwVfpRegister d14;
extern DwVfpRegister d15;
const SwVfpRegister s0 = { 0 };
const SwVfpRegister s1 = { 1 };
const SwVfpRegister s2 = { 2 };
const SwVfpRegister s3 = { 3 };
const SwVfpRegister s4 = { 4 };
const SwVfpRegister s5 = { 5 };
const SwVfpRegister s6 = { 6 };
const SwVfpRegister s7 = { 7 };
const SwVfpRegister s8 = { 8 };
const SwVfpRegister s9 = { 9 };
const SwVfpRegister s10 = { 10 };
const SwVfpRegister s11 = { 11 };
const SwVfpRegister s12 = { 12 };
const SwVfpRegister s13 = { 13 };
const SwVfpRegister s14 = { 14 };
const SwVfpRegister s15 = { 15 };
const SwVfpRegister s16 = { 16 };
const SwVfpRegister s17 = { 17 };
const SwVfpRegister s18 = { 18 };
const SwVfpRegister s19 = { 19 };
const SwVfpRegister s20 = { 20 };
const SwVfpRegister s21 = { 21 };
const SwVfpRegister s22 = { 22 };
const SwVfpRegister s23 = { 23 };
const SwVfpRegister s24 = { 24 };
const SwVfpRegister s25 = { 25 };
const SwVfpRegister s26 = { 26 };
const SwVfpRegister s27 = { 27 };
const SwVfpRegister s28 = { 28 };
const SwVfpRegister s29 = { 29 };
const SwVfpRegister s30 = { 30 };
const SwVfpRegister s31 = { 31 };
const DwVfpRegister d0 = { 0 };
const DwVfpRegister d1 = { 1 };
const DwVfpRegister d2 = { 2 };
const DwVfpRegister d3 = { 3 };
const DwVfpRegister d4 = { 4 };
const DwVfpRegister d5 = { 5 };
const DwVfpRegister d6 = { 6 };
const DwVfpRegister d7 = { 7 };
const DwVfpRegister d8 = { 8 };
const DwVfpRegister d9 = { 9 };
const DwVfpRegister d10 = { 10 };
const DwVfpRegister d11 = { 11 };
const DwVfpRegister d12 = { 12 };
const DwVfpRegister d13 = { 13 };
const DwVfpRegister d14 = { 14 };
const DwVfpRegister d15 = { 15 };
// Coprocessor register
@ -210,23 +209,24 @@ struct CRegister {
};
extern CRegister no_creg;
extern CRegister cr0;
extern CRegister cr1;
extern CRegister cr2;
extern CRegister cr3;
extern CRegister cr4;
extern CRegister cr5;
extern CRegister cr6;
extern CRegister cr7;
extern CRegister cr8;
extern CRegister cr9;
extern CRegister cr10;
extern CRegister cr11;
extern CRegister cr12;
extern CRegister cr13;
extern CRegister cr14;
extern CRegister cr15;
const CRegister no_creg = { -1 };
const CRegister cr0 = { 0 };
const CRegister cr1 = { 1 };
const CRegister cr2 = { 2 };
const CRegister cr3 = { 3 };
const CRegister cr4 = { 4 };
const CRegister cr5 = { 5 };
const CRegister cr6 = { 6 };
const CRegister cr7 = { 7 };
const CRegister cr8 = { 8 };
const CRegister cr9 = { 9 };
const CRegister cr10 = { 10 };
const CRegister cr11 = { 11 };
const CRegister cr12 = { 12 };
const CRegister cr13 = { 13 };
const CRegister cr14 = { 14 };
const CRegister cr15 = { 15 };
// Coprocessor number

4
deps/v8/src/arm/builtins-arm.cc

@ -61,10 +61,10 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
}
// JumpToRuntime expects r0 to contain the number of arguments
// JumpToExternalReference expects r0 to contain the number of arguments
// including the receiver and the extra arguments.
__ add(r0, r0, Operand(num_extra_args + 1));
__ JumpToRuntime(ExternalReference(id));
__ JumpToExternalReference(ExternalReference(id));
}

347
deps/v8/src/arm/codegen-arm.cc

@ -31,6 +31,7 @@
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
#include "ic-inl.h"
#include "parser.h"
#include "register-allocator-inl.h"
#include "runtime.h"
@ -142,6 +143,7 @@ CodeGenerator::CodeGenerator(MacroAssembler* masm)
void CodeGenerator::Generate(CompilationInfo* info) {
// Record the position for debugging purposes.
CodeForFunctionPosition(info->function());
Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
// Initialize state.
info_ = info;
@ -3321,6 +3323,25 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
}
// Generates the Math.pow method - currently just calls runtime.
void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
Load(args->at(0));
Load(args->at(1));
frame_->CallRuntime(Runtime::kMath_pow, 2);
frame_->EmitPush(r0);
}
// Generates the Math.sqrt method - currently just calls runtime.
void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
frame_->CallRuntime(Runtime::kMath_sqrt, 1);
frame_->EmitPush(r0);
}
// This should generate code that performs a charCodeAt() call or returns
// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
// It is not yet implemented on ARM, so it always goes to the slow case.
@ -3404,6 +3425,44 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
}
void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
Comment(masm_, "[ GenerateCharFromCode");
ASSERT(args->length() == 1);
LoadAndSpill(args->at(0));
frame_->EmitPop(r0);
JumpTarget slow_case;
JumpTarget exit;
// Fast case of Heap::LookupSingleCharacterStringFromCode.
ASSERT(kSmiTag == 0);
ASSERT(kSmiShiftSize == 0);
ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
__ tst(r0, Operand(kSmiTagMask |
((~String::kMaxAsciiCharCode) << kSmiTagSize)));
slow_case.Branch(nz);
ASSERT(kSmiTag == 0);
__ mov(r1, Operand(Factory::single_character_string_cache()));
__ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
__ ldr(r1, MemOperand(r1, FixedArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r1, ip);
slow_case.Branch(eq);
frame_->EmitPush(r1);
exit.Jump();
slow_case.Bind();
frame_->EmitPush(r0);
frame_->CallRuntime(Runtime::kCharFromCode, 1);
frame_->EmitPush(r0);
exit.Bind();
}
void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 1);
@ -3625,6 +3684,24 @@ void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
}
void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
// Load the argument on the stack and jump to the runtime.
Load(args->at(0));
frame_->CallRuntime(Runtime::kMath_sin, 1);
frame_->EmitPush(r0);
}
void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1);
// Load the argument on the stack and jump to the runtime.
Load(args->at(0));
frame_->CallRuntime(Runtime::kMath_cos, 1);
frame_->EmitPush(r0);
}
void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 2);
@ -4489,7 +4566,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ bind(&gc);
__ push(cp);
__ push(r3);
__ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
__ TailCallRuntime(Runtime::kNewClosure, 2, 1);
}
@ -4539,7 +4616,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Need to collect. Call into runtime system.
__ bind(&gc);
__ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
__ TailCallRuntime(Runtime::kNewContext, 1, 1);
}
@ -4601,8 +4678,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ Ret();
__ bind(&slow_case);
ExternalReference runtime(Runtime::kCreateArrayLiteralShallow);
__ TailCallRuntime(runtime, 3, 1);
__ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
}
@ -6170,12 +6246,17 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
}
Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
return Handle<Code>::null();
}
void StackCheckStub::Generate(MacroAssembler* masm) {
// Do tail-call to runtime routine. Runtime routines expect at least one
// argument, so give it a Smi.
__ mov(r0, Operand(Smi::FromInt(0)));
__ push(r0);
__ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1, 1);
__ TailCallRuntime(Runtime::kStackGuard, 1, 1);
__ StubReturn(1);
}
@ -6784,7 +6865,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// by calling the runtime system.
__ bind(&slow);
__ push(r1);
__ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1, 1);
__ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
}
@ -6887,7 +6968,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
__ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
}
@ -7178,6 +7259,170 @@ void StringStubBase::GenerateCopyCharactersLong(MacroAssembler* masm,
}
void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Register scratch5,
Label* not_found) {
// Register scratch3 is the general scratch register in this function.
Register scratch = scratch3;
// Make sure that both characters are not digits as such strings has a
// different hash algorithm. Don't try to look for these in the symbol table.
Label not_array_index;
__ sub(scratch, c1, Operand(static_cast<int>('0')));
__ cmp(scratch, Operand(static_cast<int>('9' - '0')));
__ b(hi, &not_array_index);
__ sub(scratch, c2, Operand(static_cast<int>('0')));
__ cmp(scratch, Operand(static_cast<int>('9' - '0')));
// If check failed combine both characters into single halfword.
// This is required by the contract of the method: code at the
// not_found branch expects this combination in c1 register
__ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
__ b(ls, not_found);
__ bind(&not_array_index);
// Calculate the two character string hash.
Register hash = scratch1;
GenerateHashInit(masm, hash, c1);
GenerateHashAddCharacter(masm, hash, c2);
GenerateHashGetHash(masm, hash);
// Collect the two characters in a register.
Register chars = c1;
__ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string.
// Load symbol table
// Load address of first element of the symbol table.
Register symbol_table = c2;
__ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
// Load undefined value
Register undefined = scratch4;
__ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
// Calculate capacity mask from the symbol table capacity.
Register mask = scratch2;
__ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
__ mov(mask, Operand(mask, ASR, 1));
__ sub(mask, mask, Operand(1));
// Calculate untagged address of the first element of the symbol table.
Register first_symbol_table_element = symbol_table;
__ add(first_symbol_table_element, symbol_table,
Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
// Registers
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string
// mask: capacity mask
// first_symbol_table_element: address of the first element of
// the symbol table
// scratch: -
// Perform a number of probes in the symbol table.
static const int kProbes = 4;
Label found_in_symbol_table;
Label next_probe[kProbes];
for (int i = 0; i < kProbes; i++) {
Register candidate = scratch5; // Scratch register contains candidate.
// Calculate entry in symbol table.
if (i > 0) {
__ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
} else {
__ mov(candidate, hash);
}
__ and_(candidate, candidate, Operand(mask));
// Load the entry from the symble table.
ASSERT_EQ(1, SymbolTable::kEntrySize);
__ ldr(candidate,
MemOperand(first_symbol_table_element,
candidate,
LSL,
kPointerSizeLog2));
// If entry is undefined no string with this hash can be found.
__ cmp(candidate, undefined);
__ b(eq, not_found);
// If length is not 2 the string is not a candidate.
__ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
__ cmp(scratch, Operand(2));
__ b(ne, &next_probe[i]);
// Check that the candidate is a non-external ascii string.
__ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset));
__ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch,
&next_probe[i]);
// Check if the two characters match.
// Assumes that word load is little endian.
__ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
__ cmp(chars, scratch);
__ b(eq, &found_in_symbol_table);
__ bind(&next_probe[i]);
}
// No matching 2 character string found by probing.
__ jmp(not_found);
// Scratch register contains result when we fall through to here.
Register result = scratch;
__ bind(&found_in_symbol_table);
if (!result.is(r0)) {
__ mov(r0, result);
}
}
void StringStubBase::GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character) {
// hash = character + (character << 10);
__ add(hash, character, Operand(character, LSL, 10));
// hash ^= hash >> 6;
__ eor(hash, hash, Operand(hash, ASR, 6));
}
void StringStubBase::GenerateHashAddCharacter(MacroAssembler* masm,
Register hash,
Register character) {
// hash += character;
__ add(hash, hash, Operand(character));
// hash += hash << 10;
__ add(hash, hash, Operand(hash, LSL, 10));
// hash ^= hash >> 6;
__ eor(hash, hash, Operand(hash, ASR, 6));
}
void StringStubBase::GenerateHashGetHash(MacroAssembler* masm,
Register hash) {
// hash += hash << 3;
__ add(hash, hash, Operand(hash, LSL, 3));
// hash ^= hash >> 11;
__ eor(hash, hash, Operand(hash, ASR, 11));
// hash += hash << 15;
__ add(hash, hash, Operand(hash, LSL, 15), SetCC);
// if (hash == 0) hash = 27;
__ mov(hash, Operand(27), LeaveCC, nz);
}
void SubStringStub::Generate(MacroAssembler* masm) {
Label runtime;
@ -7213,11 +7458,14 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ sub(r2, r2, Operand(r3), SetCC);
__ b(mi, &runtime); // Fail if from > to.
// Handle sub-strings of length 2 and less in the runtime system.
// Special handling of sub-strings of length 1 and 2. One character strings
// are handled in the runtime system (looked up in the single character
// cache). Two character strings are looked for in the symbol cache.
__ cmp(r2, Operand(2));
__ b(le, &runtime);
__ b(lt, &runtime);
// r2: length
// r3: from index (untaged smi)
// r6: from (smi)
// r7: to (smi)
@ -7231,6 +7479,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r1: instance type
// r2: length
// r3: from index (untaged smi)
// r5: string
// r6: from (smi)
// r7: to (smi)
@ -7257,6 +7506,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r1: instance type.
// r2: length
// r3: from index (untaged smi)
// r5: string
// r6: from (smi)
// r7: to (smi)
@ -7266,6 +7516,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// r1: instance type.
// r2: result string length.
// r3: from index (untaged smi)
// r5: string.
// r6: from offset (smi)
// Check for flat ascii string.
@ -7274,6 +7525,35 @@ void SubStringStub::Generate(MacroAssembler* masm) {
ASSERT_EQ(0, kTwoByteStringTag);
__ b(eq, &non_ascii_flat);
Label result_longer_than_two;
__ cmp(r2, Operand(2));
__ b(gt, &result_longer_than_two);
// Sub string of length 2 requested.
// Get the two characters forming the sub string.
__ add(r5, r5, Operand(r3));
__ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize));
__ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1));
// Try to lookup two character string in symbol table.
Label make_two_character_string;
GenerateTwoCharacterSymbolTableProbe(masm, r3, r4, r1, r5, r6, r7, r9,
&make_two_character_string);
__ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
__ add(sp, sp, Operand(3 * kPointerSize));
__ Ret();
// r2: result string length.
// r3: two characters combined into halfword in little endian byte order.
__ bind(&make_two_character_string);
__ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
__ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
__ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
__ add(sp, sp, Operand(3 * kPointerSize));
__ Ret();
__ bind(&result_longer_than_two);
// Allocate the result.
__ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
@ -7331,7 +7611,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
__ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1);
__ TailCallRuntime(Runtime::kSubString, 3, 1);
}
@ -7422,7 +7702,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
__ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@ -7482,14 +7762,52 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// r4: first string instance type (if string_check_)
// r5: second string instance type (if string_check_)
// Look at the length of the result of adding the two strings.
Label string_add_flat_result;
Label string_add_flat_result, longer_than_two;
// Adding two lengths can't overflow.
ASSERT(String::kMaxLength * 2 > String::kMaxLength);
__ add(r6, r2, Operand(r3));
// Use the runtime system when adding two one character strings, as it
// contains optimizations for this specific case using the symbol table.
__ cmp(r6, Operand(2));
__ b(eq, &string_add_runtime);
__ b(ne, &longer_than_two);
// Check that both strings are non-external ascii strings.
if (!string_check_) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
}
__ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
&string_add_runtime);
// Get the two characters forming the sub string.
__ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
__ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
// Try to lookup two character string in symbol table. If it is not found
// just allocate a new one.
Label make_two_character_string;
GenerateTwoCharacterSymbolTableProbe(masm, r2, r3, r6, r7, r4, r5, r9,
&make_two_character_string);
__ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
__ bind(&make_two_character_string);
// Resulting string has length 2 and first chars of two strings
// are combined into single halfword in r2 register.
// So we can fill resulting string without two loops by a single
// halfword store instruction (which assumes that processor is
// in a little endian mode)
__ mov(r6, Operand(2));
__ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime);
__ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
__ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
__ bind(&longer_than_two);
// Check if resulting string will be flat.
__ cmp(r6, Operand(String::kMinNonFlatLength));
__ b(lt, &string_add_flat_result);
@ -7568,6 +7886,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Both strings are sequential ASCII strings. We also know that they are
// short (since the sum of the lengths is less than kMinNonFlatLength).
// r6: length of resulting flat string
__ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
// Locate first character of result.
__ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
@ -7636,7 +7955,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to add the two strings.
__ bind(&string_add_runtime);
__ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
__ TailCallRuntime(Runtime::kStringAdd, 2, 1);
}

43
deps/v8/src/arm/codegen-arm.h

@ -370,6 +370,9 @@ class CodeGenerator: public AstVisitor {
// Fast support for charCodeAt(n).
void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
// Fast support for string.charAt(n) and string[n].
void GenerateCharFromCode(ZoneList<Expression*>* args);
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
@ -393,6 +396,16 @@ class CodeGenerator: public AstVisitor {
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
// Fast support for Math.pow().
void GenerateMathPow(ZoneList<Expression*>* args);
// Fast call to sine function.
void GenerateMathSin(ZoneList<Expression*>* args);
void GenerateMathCos(ZoneList<Expression*>* args);
// Fast support for Math.pow().
void GenerateMathSqrt(ZoneList<Expression*>* args);
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@ -554,6 +567,36 @@ class StringStubBase: public CodeStub {
Register scratch4,
Register scratch5,
int flags);
// Probe the symbol table for a two character string. If the string is
// not found by probing a jump to the label not_found is performed. This jump
// does not guarantee that the string is not in the symbol table. If the
// string is found the code falls through with the string in register r0.
// Contents of both c1 and c2 registers are modified. At the exit c1 is
// guaranteed to contain halfword with low and high bytes equal to
// initial contents of c1 and c2 respectively.
void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Register scratch5,
Label* not_found);
// Generate string hash.
void GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character);
void GenerateHashAddCharacter(MacroAssembler* masm,
Register hash,
Register character);
void GenerateHashGetHash(MacroAssembler* masm,
Register hash);
};

4
deps/v8/src/arm/fast-codegen-arm.cc

@ -40,6 +40,7 @@ Register FastCodeGenerator::accumulator0() { return r0; }
Register FastCodeGenerator::accumulator1() { return r1; }
Register FastCodeGenerator::scratch0() { return r3; }
Register FastCodeGenerator::scratch1() { return r4; }
Register FastCodeGenerator::scratch2() { return r5; }
Register FastCodeGenerator::receiver_reg() { return r2; }
Register FastCodeGenerator::context_reg() { return cp; }
@ -100,7 +101,7 @@ void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
if (needs_write_barrier) {
__ mov(scratch1(), Operand(offset));
__ RecordWrite(scratch0(), scratch1(), ip);
__ RecordWrite(scratch0(), scratch1(), scratch2());
}
if (destination().is(accumulator1())) {
@ -180,6 +181,7 @@ void FastCodeGenerator::EmitBitOr() {
void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
ASSERT(info_ == NULL);
info_ = compilation_info;
Comment cmnt(masm_, "[ function compiled by fast code generator");
// Save the caller's frame pointer and set up our own.
Comment prologue_cmnt(masm(), ";; Prologue");

1
deps/v8/src/arm/full-codegen-arm.cc

@ -57,6 +57,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
ASSERT(info_ == NULL);
info_ = info;
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
if (mode == PRIMARY) {
int locals_count = scope()->num_stack_slots();

21
deps/v8/src/arm/ic-arm.cc

@ -494,7 +494,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ stm(db_w, sp, r2.bit() | r3.bit());
// Perform tail call to the entry.
__ TailCallRuntime(ExternalReference(IC_Utility(kLoadIC_Miss)), 2, 1);
ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss));
__ TailCallExternalReference(ref, 2, 1);
}
@ -531,7 +532,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r2.bit() | r3.bit());
__ TailCallRuntime(ExternalReference(IC_Utility(kKeyedLoadIC_Miss)), 2, 1);
ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
__ TailCallExternalReference(ref, 2, 1);
}
@ -545,7 +547,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r2.bit() | r3.bit());
__ TailCallRuntime(ExternalReference(Runtime::kGetProperty), 2, 1);
__ TailCallRuntime(Runtime::kGetProperty, 2, 1);
}
@ -662,7 +664,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ push(r0); // key
// Perform tail call to the entry.
__ TailCallRuntime(ExternalReference(
__ TailCallExternalReference(ExternalReference(
IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1);
__ bind(&slow);
@ -681,7 +683,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
__ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
__ TailCallRuntime(ExternalReference(IC_Utility(kKeyedStoreIC_Miss)), 3, 1);
ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
__ TailCallExternalReference(ref, 3, 1);
}
@ -695,7 +698,7 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
__ ldm(ia, sp, r1.bit() | r3.bit()); // r0 == value, r1 == key, r3 == object
__ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit());
__ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
__ TailCallRuntime(Runtime::kSetProperty, 3, 1);
}
@ -854,7 +857,8 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
__ stm(db_w, sp, r2.bit() | r0.bit());
// Perform tail call to the entry.
__ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss));
__ TailCallExternalReference(ref, 3, 1);
}
@ -897,7 +901,8 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
__ push(receiver);
__ push(value);
__ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_ArrayLength)), 2, 1);
ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength));
__ TailCallExternalReference(ref, 2, 1);
__ bind(&miss);

64
deps/v8/src/arm/macro-assembler-arm.cc

@ -220,7 +220,7 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
// remembered set bits in the new space.
// object: heap object pointer (with tag)
// offset: offset to store location from the object
and_(scratch, object, Operand(Heap::NewSpaceMask()));
and_(scratch, object, Operand(ExternalReference::new_space_mask()));
cmp(scratch, Operand(ExternalReference::new_space_start()));
b(eq, &done);
@ -1234,19 +1234,26 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
}
void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
int num_arguments,
int result_size) {
void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
int num_arguments,
int result_size) {
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
mov(r0, Operand(num_arguments));
JumpToRuntime(ext);
JumpToExternalReference(ext);
}
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
}
void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
#if defined(__thumb__)
// Thumb mode builtin.
ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
@ -1410,15 +1417,12 @@ void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
int kFlatAsciiStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
int kFlatAsciiStringTag = ASCII_STRING_TYPE;
and_(scratch1, scratch1, Operand(kFlatAsciiStringMask));
and_(scratch2, scratch2, Operand(kFlatAsciiStringMask));
cmp(scratch1, Operand(kFlatAsciiStringTag));
// Ignore second test if first test failed.
cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
b(ne, failure);
JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
scratch2,
scratch1,
scratch2,
failure);
}
void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
@ -1439,6 +1443,36 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
}
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
Register first,
Register second,
Register scratch1,
Register scratch2,
Label* failure) {
int kFlatAsciiStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
int kFlatAsciiStringTag = ASCII_STRING_TYPE;
and_(scratch1, first, Operand(kFlatAsciiStringMask));
and_(scratch2, second, Operand(kFlatAsciiStringMask));
cmp(scratch1, Operand(kFlatAsciiStringTag));
// Ignore second test if first test failed.
cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
b(ne, failure);
}
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
Register scratch,
Label* failure) {
int kFlatAsciiStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
int kFlatAsciiStringTag = ASCII_STRING_TYPE;
and_(scratch, type, Operand(kFlatAsciiStringMask));
cmp(scratch, Operand(kFlatAsciiStringTag));
b(ne, failure);
}
#ifdef ENABLE_DEBUGGER_SUPPORT
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),

28
deps/v8/src/arm/macro-assembler-arm.h

@ -333,7 +333,6 @@ class MacroAssembler: public Assembler {
void StubReturn(int argc);
// Call a runtime routine.
// Eventually this should be used for all C calls.
void CallRuntime(Runtime::Function* f, int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
@ -344,14 +343,19 @@ class MacroAssembler: public Assembler {
int num_arguments);
// Tail call of a runtime routine (jump).
// Like JumpToRuntime, but also takes care of passing the number
// Like JumpToExternalReference, but also takes care of passing the number
// of parameters.
void TailCallRuntime(const ExternalReference& ext,
void TailCallExternalReference(const ExternalReference& ext,
int num_arguments,
int result_size);
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size);
// Jump to a runtime routine.
void JumpToRuntime(const ExternalReference& builtin);
void JumpToExternalReference(const ExternalReference& builtin);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
@ -421,6 +425,22 @@ class MacroAssembler: public Assembler {
Register scratch2,
Label* not_flat_ascii_strings);
// Checks if both instance types are sequential ASCII strings and jumps to
// label if either is not.
void JumpIfBothInstanceTypesAreNotSequentialAscii(
Register first_object_instance_type,
Register second_object_instance_type,
Register scratch1,
Register scratch2,
Label* failure);
// Check if instance type is sequential ASCII string and jump to label if
// it is not.
void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
Register scratch,
Label* failure);
private:
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);

4
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -765,7 +765,7 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Label grow_failed;
// Call GrowStack(backtrack_stackpointer())
int num_arguments = 2;
static const int num_arguments = 2;
FrameAlign(num_arguments, r0);
__ mov(r0, backtrack_stackpointer());
__ add(r1, frame_pointer(), Operand(kStackHighEnd));
@ -966,7 +966,7 @@ void RegExpMacroAssemblerARM::WriteStackPointerToRegister(int reg) {
// Private methods:
void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
int num_arguments = 3;
static const int num_arguments = 3;
FrameAlign(num_arguments, scratch);
// RegExp code frame pointer.
__ mov(r2, frame_pointer());

12
deps/v8/src/arm/stub-cache-arm.cc

@ -297,7 +297,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ push(receiver_reg);
__ mov(r2, Operand(Handle<Map>(transition)));
__ stm(db_w, sp, r2.bit() | r0.bit());
__ TailCallRuntime(
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)),
3, 1);
return;
@ -529,7 +529,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallRuntime(ref, 5, 1);
__ TailCallExternalReference(ref, 5, 1);
__ bind(&cleanup);
__ pop(scratch1);
@ -549,7 +549,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
__ TailCallRuntime(ref, 5, 1);
__ TailCallExternalReference(ref, 5, 1);
}
private:
@ -719,7 +719,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
// Do tail-call to the runtime system.
ExternalReference load_callback_property =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallRuntime(load_callback_property, 5, 1);
__ TailCallExternalReference(load_callback_property, 5, 1);
return true;
}
@ -1204,7 +1204,7 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty));
__ TailCallRuntime(store_callback_property, 4, 1);
__ TailCallExternalReference(store_callback_property, 4, 1);
// Handle store cache miss.
__ bind(&miss);
@ -1251,7 +1251,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
__ TailCallRuntime(store_ic_property, 3, 1);
__ TailCallExternalReference(store_ic_property, 3, 1);
// Handle store cache miss.
__ bind(&miss);

21
deps/v8/src/arm/virtual-frame-arm.cc

@ -35,27 +35,8 @@
namespace v8 {
namespace internal {
// -------------------------------------------------------------------------
// VirtualFrame implementation.
#define __ ACCESS_MASM(masm())
// On entry to a function, the virtual frame already contains the
// receiver and the parameters. All initial frame elements are in
// memory.
VirtualFrame::VirtualFrame()
: elements_(parameter_count() + local_count() + kPreallocatedElements),
stack_pointer_(parameter_count()) { // 0-based index of TOS.
for (int i = 0; i <= stack_pointer_; i++) {
elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
}
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
register_locations_[i] = kIllegalIndex;
}
}
void VirtualFrame::SyncElementBelowStackPointer(int index) {
UNREACHABLE();
}
@ -314,7 +295,7 @@ void VirtualFrame::EmitPop(Register reg) {
void VirtualFrame::EmitPush(Register reg) {
ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown()));
stack_pointer_++;
__ push(reg);
}

6
deps/v8/src/arm/virtual-frame-arm.h

@ -59,7 +59,7 @@ class VirtualFrame : public ZoneObject {
static const int kIllegalIndex = -1;
// Construct an initial virtual frame on entry to a JS function.
VirtualFrame();
inline VirtualFrame();
// Construct a virtual frame as a clone of an existing one.
explicit inline VirtualFrame(VirtualFrame* original);
@ -69,7 +69,7 @@ class VirtualFrame : public ZoneObject {
// Create a duplicate of an existing valid frame element.
FrameElement CopyElementAt(int index,
NumberInfo::Type info = NumberInfo::kUnknown);
NumberInfo info = NumberInfo::Unknown());
// The number of elements on the virtual frame.
int element_count() { return elements_.length(); }
@ -344,7 +344,7 @@ class VirtualFrame : public ZoneObject {
void EmitPushMultiple(int count, int src_regs);
// Push an element on the virtual frame.
inline void Push(Register reg, NumberInfo::Type info = NumberInfo::kUnknown);
inline void Push(Register reg, NumberInfo info = NumberInfo::Unknown());
inline void Push(Handle<Object> value);
inline void Push(Smi* value);

2
deps/v8/src/array.js

@ -1149,6 +1149,8 @@ function SetupArray() {
ArrayReduce: 1,
ArrayReduceRight: 1
});
%FinishArrayPrototypeSetup($Array.prototype);
}

10
deps/v8/src/assembler.cc

@ -579,6 +579,11 @@ ExternalReference ExternalReference::random_positive_smi_function() {
}
ExternalReference ExternalReference::transcendental_cache_array_address() {
return ExternalReference(TranscendentalCache::cache_array_address());
}
ExternalReference ExternalReference::keyed_lookup_cache_keys() {
return ExternalReference(KeyedLookupCache::keys_address());
}
@ -619,6 +624,11 @@ ExternalReference ExternalReference::new_space_start() {
}
ExternalReference ExternalReference::new_space_mask() {
return ExternalReference(reinterpret_cast<Address>(Heap::NewSpaceMask()));
}
ExternalReference ExternalReference::new_space_allocation_top_address() {
return ExternalReference(Heap::NewSpaceAllocationTopAddress());
}

3
deps/v8/src/assembler.h

@ -37,7 +37,6 @@
#include "runtime.h"
#include "top.h"
#include "zone-inl.h"
#include "token.h"
namespace v8 {
@ -400,6 +399,7 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference perform_gc_function();
static ExternalReference random_positive_smi_function();
static ExternalReference transcendental_cache_array_address();
// Static data in the keyed lookup cache.
static ExternalReference keyed_lookup_cache_keys();
@ -427,6 +427,7 @@ class ExternalReference BASE_EMBEDDED {
// Static variable Heap::NewSpaceStart()
static ExternalReference new_space_start();
static ExternalReference new_space_mask();
static ExternalReference heap_always_allocate_scope_depth();
// Used for fast allocation in generated code.

5
deps/v8/src/ast.cc

@ -67,8 +67,6 @@ VariableProxy::VariableProxy(Handle<String> name,
inside_with_(inside_with) {
// names must be canonicalized for fast equality checks
ASSERT(name->IsSymbol());
// at least one access, otherwise no need for a VariableProxy
var_uses_.RecordRead(1);
}
@ -87,8 +85,7 @@ void VariableProxy::BindTo(Variable* var) {
// eval() etc. Const-ness and variable declarations are a complete mess
// in JS. Sigh...
var_ = var;
var->var_uses()->RecordUses(&var_uses_);
var->obj_uses()->RecordUses(&obj_uses_);
var->set_is_used(true);
}

61
deps/v8/src/ast.h

@ -117,6 +117,9 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
class AstNode: public ZoneObject {
public:
static const int kNoNumber = -1;
AstNode() : num_(kNoNumber) {}
virtual ~AstNode() { }
virtual void Accept(AstVisitor* v) = 0;
@ -141,6 +144,13 @@ class AstNode: public ZoneObject {
virtual ObjectLiteral* AsObjectLiteral() { return NULL; }
virtual ArrayLiteral* AsArrayLiteral() { return NULL; }
virtual CompareOperation* AsCompareOperation() { return NULL; }
int num() { return num_; }
void set_num(int n) { num_ = n; }
private:
// Support for ast node numbering.
int num_;
};
@ -181,9 +191,10 @@ class Expression: public AstNode {
kTestValue
};
static const int kNoLabel = -1;
Expression() : num_(kNoLabel), def_(NULL), defined_vars_(NULL) {}
Expression()
: bitfields_(0),
def_(NULL),
defined_vars_(NULL) {}
virtual Expression* AsExpression() { return this; }
@ -211,11 +222,6 @@ class Expression: public AstNode {
// Static type information for this expression.
StaticType* type() { return &type_; }
int num() { return num_; }
// AST node numbering ordered by evaluation order.
void set_num(int n) { num_ = n; }
// Data flow information.
DefinitionInfo* var_def() { return def_; }
void set_var_def(DefinitionInfo* def) { def_ = def; }
@ -225,11 +231,36 @@ class Expression: public AstNode {
defined_vars_ = defined_vars;
}
// AST analysis results
// True if the expression rooted at this node can be compiled by the
// side-effect free compiler.
bool side_effect_free() { return SideEffectFreeField::decode(bitfields_); }
void set_side_effect_free(bool is_side_effect_free) {
bitfields_ &= ~SideEffectFreeField::mask();
bitfields_ |= SideEffectFreeField::encode(is_side_effect_free);
}
// Will ToInt32 (ECMA 262-3 9.5) or ToUint32 (ECMA 262-3 9.6)
// be applied to the value of this expression?
// If so, we may be able to optimize the calculation of the value.
bool to_int32() { return ToInt32Field::decode(bitfields_); }
void set_to_int32(bool to_int32) {
bitfields_ &= ~ToInt32Field::mask();
bitfields_ |= ToInt32Field::encode(to_int32);
}
private:
uint32_t bitfields_;
StaticType type_;
int num_;
DefinitionInfo* def_;
ZoneList<DefinitionInfo*>* defined_vars_;
// Using template BitField<type, start, size>.
class SideEffectFreeField : public BitField<bool, 0, 1> {};
class ToInt32Field : public BitField<bool, 1, 1> {};
};
@ -931,6 +962,10 @@ class VariableProxy: public Expression {
return var()->is_global() || var()->rewrite()->IsLeaf();
}
// Reading from a mutable variable is a side effect, but 'this' is
// immutable.
virtual bool IsTrivial() { return is_this(); }
bool IsVariable(Handle<String> n) {
return !is_this() && name().is_identical_to(n);
}
@ -942,8 +977,6 @@ class VariableProxy: public Expression {
Handle<String> name() const { return name_; }
Variable* var() const { return var_; }
UseCount* var_uses() { return &var_uses_; }
UseCount* obj_uses() { return &obj_uses_; }
bool is_this() const { return is_this_; }
bool inside_with() const { return inside_with_; }
@ -956,10 +989,6 @@ class VariableProxy: public Expression {
bool is_this_;
bool inside_with_;
// VariableProxy usage info.
UseCount var_uses_; // uses of the variable value
UseCount obj_uses_; // uses of the object the variable points to
VariableProxy(Handle<String> name, bool is_this, bool inside_with);
explicit VariableProxy(bool is_this);
@ -1018,6 +1047,8 @@ class Slot: public Expression {
virtual bool IsLeaf() { return true; }
bool IsStackAllocated() { return type_ == PARAMETER || type_ == LOCAL; }
// Accessors
Variable* var() const { return var_; }
Type type() const { return type_; }

13
deps/v8/src/bootstrapper.cc

@ -1050,6 +1050,19 @@ bool Genesis::InstallNatives() {
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
global_context()->set_empty_script(*script);
}
{
// Builtin function for OpaqueReference -- a JSValue-based object,
// that keeps its field isolated from JavaScript code. It may store
// objects, that JavaScript code may not access.
Handle<JSFunction> opaque_reference_fun =
InstallFunction(builtins, "OpaqueReference", JS_VALUE_TYPE,
JSValue::kSize, Top::initial_object_prototype(),
Builtins::Illegal, false);
Handle<JSObject> prototype =
Factory::NewJSObject(Top::object_function(), TENURED);
SetPrototype(opaque_reference_fun, prototype);
global_context()->set_opaque_reference_function(*opaque_reference_fun);
}
if (FLAG_natives_file == NULL) {
// Without natives file, install default natives.

376
deps/v8/src/builtins.cc

@ -242,6 +242,109 @@ BUILTIN(ArrayCodeGeneric) {
}
static Object* AllocateJSArray() {
JSFunction* array_function =
Top::context()->global_context()->array_function();
Object* result = Heap::AllocateJSObject(array_function);
if (result->IsFailure()) return result;
return result;
}
static Object* AllocateEmptyJSArray() {
Object* result = AllocateJSArray();
if (result->IsFailure()) return result;
JSArray* result_array = JSArray::cast(result);
result_array->set_length(Smi::FromInt(0));
result_array->set_elements(Heap::empty_fixed_array());
return result_array;
}
static void CopyElements(AssertNoAllocation* no_gc,
FixedArray* dst,
int dst_index,
FixedArray* src,
int src_index,
int len) {
ASSERT(dst != src); // Use MoveElements instead.
memcpy(dst->data_start() + dst_index,
src->data_start() + src_index,
len * kPointerSize);
WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc);
if (mode == UPDATE_WRITE_BARRIER) {
Heap::RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
}
}
static void MoveElements(AssertNoAllocation* no_gc,
FixedArray* dst,
int dst_index,
FixedArray* src,
int src_index,
int len) {
memmove(dst->data_start() + dst_index,
src->data_start() + src_index,
len * kPointerSize);
WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc);
if (mode == UPDATE_WRITE_BARRIER) {
Heap::RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
}
}
static void FillWithHoles(FixedArray* dst, int from, int to) {
MemsetPointer(dst->data_start() + from, Heap::the_hole_value(), to - from);
}
static bool ArrayPrototypeHasNoElements() {
// This method depends on non writability of Object and Array prototype
// fields.
Context* global_context = Top::context()->global_context();
// Array.prototype
JSObject* proto =
JSObject::cast(global_context->array_function()->prototype());
if (proto->elements() != Heap::empty_fixed_array()) return false;
// Hidden prototype
proto = JSObject::cast(proto->GetPrototype());
ASSERT(proto->elements() == Heap::empty_fixed_array());
// Object.prototype
proto = JSObject::cast(proto->GetPrototype());
if (proto != global_context->initial_object_prototype()) return false;
if (proto->elements() != Heap::empty_fixed_array()) return false;
ASSERT(proto->GetPrototype()->IsNull());
return true;
}
static Object* CallJsBuiltin(const char* name,
BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
HandleScope handleScope;
Handle<Object> js_builtin =
GetProperty(Handle<JSObject>(Top::global_context()->builtins()),
name);
ASSERT(js_builtin->IsJSFunction());
Handle<JSFunction> function(Handle<JSFunction>::cast(js_builtin));
Vector<Object**> argv(Vector<Object**>::New(args.length() - 1));
int n_args = args.length() - 1;
for (int i = 0; i < n_args; i++) {
argv[i] = &args[i + 1];
}
bool pending_exception = false;
Handle<Object> result = Execution::Call(function,
args.receiver(),
n_args,
argv.start(),
&pending_exception);
argv.Dispose();
if (pending_exception) return Failure::Exception();
return *result;
}
BUILTIN(ArrayPush) {
JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements());
@ -261,22 +364,21 @@ BUILTIN(ArrayPush) {
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
Object* obj = Heap::AllocateUninitializedFixedArray(capacity);
if (obj->IsFailure()) return obj;
FixedArray* new_elms = FixedArray::cast(obj);
AssertNoAllocation no_gc;
FixedArray* new_elms = FixedArray::cast(obj);
WriteBarrierMode mode = new_elms->GetWriteBarrierMode(no_gc);
// Fill out the new array with old elements.
for (int i = 0; i < len; i++) new_elms->set(i, elms->get(i), mode);
CopyElements(&no_gc, new_elms, 0, elms, 0, len);
FillWithHoles(new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
}
// Add the provided values.
AssertNoAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
// Add the provided values.
for (int index = 0; index < to_add; index++) {
elms->set(index + len, args[index + 1], mode);
}
@ -290,10 +392,9 @@ BUILTIN(ArrayPush) {
BUILTIN(ArrayPop) {
JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements());
Object* undefined = Heap::undefined_value();
int len = Smi::cast(array->length())->value();
if (len == 0) return undefined;
if (len == 0) return Heap::undefined_value();
// Get top element
FixedArray* elms = FixedArray::cast(array->elements());
@ -318,41 +419,28 @@ BUILTIN(ArrayPop) {
}
static Object* GetElementToMove(uint32_t index,
FixedArray* elms,
JSObject* prototype) {
Object* e = elms->get(index);
if (e->IsTheHole() && prototype->HasElement(index)) {
e = prototype->GetElement(index);
BUILTIN(ArrayShift) {
if (!ArrayPrototypeHasNoElements()) {
return CallJsBuiltin("ArrayShift", args);
}
return e;
}
BUILTIN(ArrayShift) {
JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value();
if (len == 0) return Heap::undefined_value();
// Fetch the prototype.
JSFunction* array_function =
Top::context()->global_context()->array_function();
JSObject* prototype = JSObject::cast(array_function->prototype());
FixedArray* elms = FixedArray::cast(array->elements());
// Get first element
Object* first = elms->get(0);
if (first->IsTheHole()) {
first = prototype->GetElement(0);
first = Heap::undefined_value();
}
// Shift the elements.
for (int i = 0; i < len - 1; i++) {
elms->set(i, GetElementToMove(i + 1, elms, prototype));
}
AssertNoAllocation no_gc;
MoveElements(&no_gc, elms, 0, elms, 1, len - 1);
elms->set(len - 1, Heap::the_hole_value());
// Set the length.
@ -363,6 +451,10 @@ BUILTIN(ArrayShift) {
BUILTIN(ArrayUnshift) {
if (!ArrayPrototypeHasNoElements()) {
return CallJsBuiltin("ArrayUnshift", args);
}
JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements());
@ -379,38 +471,22 @@ BUILTIN(ArrayUnshift) {
FixedArray* elms = FixedArray::cast(array->elements());
// Fetch the prototype.
JSFunction* array_function =
Top::context()->global_context()->array_function();
JSObject* prototype = JSObject::cast(array_function->prototype());
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
Object* obj = Heap::AllocateUninitializedFixedArray(capacity);
if (obj->IsFailure()) return obj;
FixedArray* new_elms = FixedArray::cast(obj);
AssertNoAllocation no_gc;
FixedArray* new_elms = FixedArray::cast(obj);
WriteBarrierMode mode = new_elms->GetWriteBarrierMode(no_gc);
// Fill out the new array with old elements.
for (int i = 0; i < len; i++)
new_elms->set(to_add + i,
GetElementToMove(i, elms, prototype),
mode);
CopyElements(&no_gc, new_elms, to_add, elms, 0, len);
FillWithHoles(new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
} else {
AssertNoAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
// Move elements to the right
for (int i = 0; i < len; i++) {
elms->set(new_length - i - 1,
GetElementToMove(len - i - 1, elms, prototype),
mode);
}
MoveElements(&no_gc, elms, to_add, elms, 0, len);
}
// Add the provided values.
@ -426,32 +502,11 @@ BUILTIN(ArrayUnshift) {
}
static Object* CallJsBuiltin(const char* name,
BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
HandleScope handleScope;
Handle<Object> js_builtin =
GetProperty(Handle<JSObject>(Top::global_context()->builtins()),
name);
ASSERT(js_builtin->IsJSFunction());
Handle<JSFunction> function(Handle<JSFunction>::cast(js_builtin));
Vector<Object**> argv(Vector<Object**>::New(args.length() - 1));
int n_args = args.length() - 1;
for (int i = 0; i < n_args; i++) {
argv[i] = &args[i + 1];
BUILTIN(ArraySlice) {
if (!ArrayPrototypeHasNoElements()) {
return CallJsBuiltin("ArraySlice", args);
}
bool pending_exception = false;
Handle<Object> result = Execution::Call(function,
args.receiver(),
n_args,
argv.start(),
&pending_exception);
if (pending_exception) return Failure::Exception();
return *result;
}
BUILTIN(ArraySlice) {
JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements());
@ -460,21 +515,21 @@ BUILTIN(ArraySlice) {
int n_arguments = args.length() - 1;
// Note carefully choosen defaults---if argument is missing,
// it's undefined which gets converted to 0 for relativeStart
// and to len for relativeEnd.
int relativeStart = 0;
int relativeEnd = len;
// it's undefined which gets converted to 0 for relative_start
// and to len for relative_end.
int relative_start = 0;
int relative_end = len;
if (n_arguments > 0) {
Object* arg1 = args[1];
if (arg1->IsSmi()) {
relativeStart = Smi::cast(arg1)->value();
relative_start = Smi::cast(arg1)->value();
} else if (!arg1->IsUndefined()) {
return CallJsBuiltin("ArraySlice", args);
}
if (n_arguments > 1) {
Object* arg2 = args[2];
if (arg2->IsSmi()) {
relativeEnd = Smi::cast(arg2)->value();
relative_end = Smi::cast(arg2)->value();
} else if (!arg2->IsUndefined()) {
return CallJsBuiltin("ArraySlice", args);
}
@ -482,43 +537,31 @@ BUILTIN(ArraySlice) {
}
// ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 6.
int k = (relativeStart < 0) ? Max(len + relativeStart, 0)
: Min(relativeStart, len);
int k = (relative_start < 0) ? Max(len + relative_start, 0)
: Min(relative_start, len);
// ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 8.
int final = (relativeEnd < 0) ? Max(len + relativeEnd, 0)
: Min(relativeEnd, len);
int final = (relative_end < 0) ? Max(len + relative_end, 0)
: Min(relative_end, len);
// Calculate the length of result array.
int result_len = final - k;
if (result_len < 0) {
result_len = 0;
if (result_len <= 0) {
return AllocateEmptyJSArray();
}
JSFunction* array_function =
Top::context()->global_context()->array_function();
Object* result = Heap::AllocateJSObject(array_function);
Object* result = AllocateJSArray();
if (result->IsFailure()) return result;
JSArray* result_array = JSArray::cast(result);
result = Heap::AllocateFixedArrayWithHoles(result_len);
result = Heap::AllocateUninitializedFixedArray(result_len);
if (result->IsFailure()) return result;
FixedArray* result_elms = FixedArray::cast(result);
FixedArray* elms = FixedArray::cast(array->elements());
// Fetch the prototype.
JSObject* prototype = JSObject::cast(array_function->prototype());
AssertNoAllocation no_gc;
WriteBarrierMode mode = result_elms->GetWriteBarrierMode(no_gc);
// Fill newly created array.
for (int i = 0; i < result_len; i++) {
result_elms->set(i,
GetElementToMove(k + i, elms, prototype),
mode);
}
CopyElements(&no_gc, result_elms, 0, elms, k, result_len);
// Set elements.
result_array->set_elements(result_elms);
@ -530,6 +573,10 @@ BUILTIN(ArraySlice) {
BUILTIN(ArraySplice) {
if (!ArrayPrototypeHasNoElements()) {
return CallJsBuiltin("ArraySplice", args);
}
JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements());
@ -546,118 +593,111 @@ BUILTIN(ArraySplice) {
return Heap::undefined_value();
}
int relativeStart = 0;
int relative_start = 0;
Object* arg1 = args[1];
if (arg1->IsSmi()) {
relativeStart = Smi::cast(arg1)->value();
relative_start = Smi::cast(arg1)->value();
} else if (!arg1->IsUndefined()) {
return CallJsBuiltin("ArraySplice", args);
}
int actualStart = (relativeStart < 0) ? Max(len + relativeStart, 0)
: Min(relativeStart, len);
int actual_start = (relative_start < 0) ? Max(len + relative_start, 0)
: Min(relative_start, len);
// SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
// given differently from when an undefined delete count is given.
// This does not follow ECMA-262, but we do the same for
// compatibility.
int deleteCount = len;
int delete_count = len;
if (n_arguments > 1) {
Object* arg2 = args[2];
if (arg2->IsSmi()) {
deleteCount = Smi::cast(arg2)->value();
delete_count = Smi::cast(arg2)->value();
} else {
return CallJsBuiltin("ArraySplice", args);
}
}
int actualDeleteCount = Min(Max(deleteCount, 0), len - actualStart);
JSFunction* array_function =
Top::context()->global_context()->array_function();
// Allocate result array.
Object* result = Heap::AllocateJSObject(array_function);
if (result->IsFailure()) return result;
JSArray* result_array = JSArray::cast(result);
result = Heap::AllocateFixedArrayWithHoles(actualDeleteCount);
if (result->IsFailure()) return result;
FixedArray* result_elms = FixedArray::cast(result);
int actual_delete_count = Min(Max(delete_count, 0), len - actual_start);
FixedArray* elms = FixedArray::cast(array->elements());
// Fetch the prototype.
JSObject* prototype = JSObject::cast(array_function->prototype());
JSArray* result_array = NULL;
if (actual_delete_count == 0) {
Object* result = AllocateEmptyJSArray();
if (result->IsFailure()) return result;
result_array = JSArray::cast(result);
} else {
// Allocate result array.
Object* result = AllocateJSArray();
if (result->IsFailure()) return result;
result_array = JSArray::cast(result);
AssertNoAllocation no_gc;
WriteBarrierMode mode = result_elms->GetWriteBarrierMode(no_gc);
result = Heap::AllocateUninitializedFixedArray(actual_delete_count);
if (result->IsFailure()) return result;
FixedArray* result_elms = FixedArray::cast(result);
// Fill newly created array.
for (int k = 0; k < actualDeleteCount; k++) {
result_elms->set(k,
GetElementToMove(actualStart + k, elms, prototype),
mode);
}
AssertNoAllocation no_gc;
// Fill newly created array.
CopyElements(&no_gc,
result_elms, 0,
elms, actual_start,
actual_delete_count);
// Set elements.
result_array->set_elements(result_elms);
// Set elements.
result_array->set_elements(result_elms);
// Set the length.
result_array->set_length(Smi::FromInt(actualDeleteCount));
// Set the length.
result_array->set_length(Smi::FromInt(actual_delete_count));
}
int itemCount = (n_arguments > 1) ? (n_arguments - 2) : 0;
int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
int new_length = len - actualDeleteCount + itemCount;
int new_length = len - actual_delete_count + item_count;
mode = elms->GetWriteBarrierMode(no_gc);
if (itemCount < actualDeleteCount) {
if (item_count < actual_delete_count) {
// Shrink the array.
for (int k = actualStart; k < (len - actualDeleteCount); k++) {
elms->set(k + itemCount,
GetElementToMove(k + actualDeleteCount, elms, prototype),
mode);
}
for (int k = len; k > new_length; k--) {
elms->set(k - 1, Heap::the_hole_value());
}
} else if (itemCount > actualDeleteCount) {
AssertNoAllocation no_gc;
MoveElements(&no_gc,
elms, actual_start + item_count,
elms, actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
FillWithHoles(elms, new_length, len);
} else if (item_count > actual_delete_count) {
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
ASSERT((itemCount - actualDeleteCount) <= (Smi::kMaxValue - len));
FixedArray* source_elms = elms;
ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len));
// Check if array need to grow.
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
Object* obj = Heap::AllocateUninitializedFixedArray(capacity);
if (obj->IsFailure()) return obj;
FixedArray* new_elms = FixedArray::cast(obj);
mode = new_elms->GetWriteBarrierMode(no_gc);
// Copy the part before actualStart as is.
for (int k = 0; k < actualStart; k++) {
new_elms->set(k, elms->get(k), mode);
}
AssertNoAllocation no_gc;
// Copy the part before actual_start as is.
CopyElements(&no_gc, new_elms, 0, elms, 0, actual_start);
CopyElements(&no_gc,
new_elms, actual_start + item_count,
elms, actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
FillWithHoles(new_elms, new_length, capacity);
source_elms = elms;
elms = new_elms;
array->set_elements(elms);
}
for (int k = len - actualDeleteCount; k > actualStart; k--) {
elms->set(k + itemCount - 1,
GetElementToMove(k + actualDeleteCount - 1,
source_elms,
prototype),
mode);
} else {
AssertNoAllocation no_gc;
MoveElements(&no_gc,
elms, actual_start + item_count,
elms, actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
}
}
for (int k = actualStart; k < actualStart + itemCount; k++) {
elms->set(k, args[3 + k - actualStart], mode);
AssertNoAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
for (int k = actual_start; k < actual_start + item_count; k++) {
elms->set(k, args[3 + k - actual_start], mode);
}
// Set the length.

15
deps/v8/src/code-stubs.cc

@ -83,6 +83,11 @@ void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
}
int CodeStub::GetCodeKind() {
return Code::STUB;
}
Handle<Code> CodeStub::GetCode() {
Code* code;
if (!FindCodeInCache(&code)) {
@ -97,7 +102,10 @@ Handle<Code> CodeStub::GetCode() {
masm.GetCode(&desc);
// Copy the generated code into a heap object.
Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop());
Code::Flags flags = Code::ComputeFlags(
static_cast<Code::Kind>(GetCodeKind()),
InLoop(),
GetICState());
Handle<Code> new_object =
Factory::NewCode(desc, NULL, flags, masm.CodeObject());
RecordCodeGeneration(*new_object, &masm);
@ -132,7 +140,10 @@ Object* CodeStub::TryGetCode() {
masm.GetCode(&desc);
// Try to copy the generated code into a heap object.
Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop());
Code::Flags flags = Code::ComputeFlags(
static_cast<Code::Kind>(GetCodeKind()),
InLoop(),
GetICState());
Object* new_object =
Heap::CreateCode(desc, NULL, flags, masm.CodeObject());
if (new_object->IsFailure()) return new_object;

11
deps/v8/src/code-stubs.h

@ -28,6 +28,8 @@
#ifndef V8_CODE_STUBS_H_
#define V8_CODE_STUBS_H_
#include "globals.h"
namespace v8 {
namespace internal {
@ -48,6 +50,7 @@ namespace internal {
V(FastNewClosure) \
V(FastNewContext) \
V(FastCloneShallowArray) \
V(TranscendentalCache) \
V(GenericUnaryOp) \
V(RevertToNumber) \
V(ToBoolean) \
@ -138,6 +141,14 @@ class CodeStub BASE_EMBEDDED {
// lazily generated function should be fully optimized or not.
virtual InLoopFlag InLoop() { return NOT_IN_LOOP; }
// GenericBinaryOpStub needs to override this.
virtual int GetCodeKind();
// GenericBinaryOpStub needs to override this.
virtual InlineCacheState GetICState() {
return UNINITIALIZED;
}
// Returns a name for logging/debugging purposes.
virtual const char* GetName() { return MajorName(MajorKey(), false); }

5
deps/v8/src/codegen.cc

@ -369,6 +369,7 @@ CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
{&CodeGenerator::GenerateValueOf, "_ValueOf"},
{&CodeGenerator::GenerateSetValueOf, "_SetValueOf"},
{&CodeGenerator::GenerateFastCharCodeAt, "_FastCharCodeAt"},
{&CodeGenerator::GenerateCharFromCode, "_CharFromCode"},
{&CodeGenerator::GenerateObjectEquals, "_ObjectEquals"},
{&CodeGenerator::GenerateLog, "_Log"},
{&CodeGenerator::GenerateRandomPositiveSmi, "_RandomPositiveSmi"},
@ -380,6 +381,10 @@ CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
{&CodeGenerator::GenerateStringCompare, "_StringCompare"},
{&CodeGenerator::GenerateRegExpExec, "_RegExpExec"},
{&CodeGenerator::GenerateNumberToString, "_NumberToString"},
{&CodeGenerator::GenerateMathPow, "_Math_pow"},
{&CodeGenerator::GenerateMathSin, "_Math_sin"},
{&CodeGenerator::GenerateMathCos, "_Math_cos"},
{&CodeGenerator::GenerateMathSqrt, "_Math_sqrt"},
};

70
deps/v8/src/compilation-cache.cc

@ -32,28 +32,23 @@
namespace v8 {
namespace internal {
// The number of sub caches covering the different types to cache.
static const int kSubCacheCount = 4;
// The number of generations for each sub cache.
#if defined(ANDROID)
static const int kScriptGenerations = 1;
static const int kEvalGlobalGenerations = 1;
static const int kEvalContextualGenerations = 1;
static const int kRegExpGenerations = 1;
#else
// The number of ScriptGenerations is carefully chosen based on histograms.
// See issue 458: http://code.google.com/p/v8/issues/detail?id=458
static const int kScriptGenerations = 5;
static const int kEvalGlobalGenerations = 2;
static const int kEvalContextualGenerations = 2;
static const int kRegExpGenerations = 2;
#endif
// Initial size of each compilation cache table allocated.
static const int kInitialCacheSize = 64;
// Index for the first generation in the cache.
static const int kFirstGeneration = 0;
// The compilation cache consists of several generational sub-caches which uses
// this class as a base class. A sub-cache contains a compilation cache tables
// for each generation of the sub-cache. Since the same source code string has
@ -70,6 +65,15 @@ class CompilationSubCache {
// Get the compilation cache tables for a specific generation.
Handle<CompilationCacheTable> GetTable(int generation);
// Accessors for first generation.
Handle<CompilationCacheTable> GetFirstTable() {
return GetTable(kFirstGeneration);
}
void SetFirstTable(Handle<CompilationCacheTable> value) {
ASSERT(kFirstGeneration < generations_);
tables_[kFirstGeneration] = *value;
}
// Age the sub-cache by evicting the oldest generation and creating a new
// young generation.
void Age();
@ -104,6 +108,10 @@ class CompilationCacheScript : public CompilationSubCache {
void Put(Handle<String> source, Handle<JSFunction> boilerplate);
private:
// Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(Handle<String> source,
Handle<JSFunction> boilerplate);
bool HasOrigin(Handle<JSFunction> boilerplate,
Handle<Object> name,
int line_offset,
@ -125,6 +133,12 @@ class CompilationCacheEval: public CompilationSubCache {
Handle<Context> context,
Handle<JSFunction> boilerplate);
private:
// Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(Handle<String> source,
Handle<Context> context,
Handle<JSFunction> boilerplate);
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
};
@ -140,6 +154,11 @@ class CompilationCacheRegExp: public CompilationSubCache {
void Put(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data);
private:
// Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data);
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
};
@ -287,12 +306,19 @@ Handle<JSFunction> CompilationCacheScript::Lookup(Handle<String> source,
}
Handle<CompilationCacheTable> CompilationCacheScript::TablePut(
Handle<String> source,
Handle<JSFunction> boilerplate) {
CALL_HEAP_FUNCTION(GetFirstTable()->Put(*source, *boilerplate),
CompilationCacheTable);
}
void CompilationCacheScript::Put(Handle<String> source,
Handle<JSFunction> boilerplate) {
HandleScope scope;
ASSERT(boilerplate->IsBoilerplate());
Handle<CompilationCacheTable> table = GetTable(0);
CALL_HEAP_FUNCTION_VOID(table->Put(*source, *boilerplate));
SetFirstTable(TablePut(source, boilerplate));
}
@ -326,13 +352,21 @@ Handle<JSFunction> CompilationCacheEval::Lookup(Handle<String> source,
}
Handle<CompilationCacheTable> CompilationCacheEval::TablePut(
Handle<String> source,
Handle<Context> context,
Handle<JSFunction> boilerplate) {
CALL_HEAP_FUNCTION(GetFirstTable()->PutEval(*source, *context, *boilerplate),
CompilationCacheTable);
}
void CompilationCacheEval::Put(Handle<String> source,
Handle<Context> context,
Handle<JSFunction> boilerplate) {
HandleScope scope;
ASSERT(boilerplate->IsBoilerplate());
Handle<CompilationCacheTable> table = GetTable(0);
CALL_HEAP_FUNCTION_VOID(table->PutEval(*source, *context, *boilerplate));
SetFirstTable(TablePut(source, context, boilerplate));
}
@ -366,12 +400,20 @@ Handle<FixedArray> CompilationCacheRegExp::Lookup(Handle<String> source,
}
Handle<CompilationCacheTable> CompilationCacheRegExp::TablePut(
Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data) {
CALL_HEAP_FUNCTION(GetFirstTable()->PutRegExp(*source, flags, *data),
CompilationCacheTable);
}
void CompilationCacheRegExp::Put(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data) {
HandleScope scope;
Handle<CompilationCacheTable> table = GetTable(0);
CALL_HEAP_FUNCTION_VOID(table->PutRegExp(*source, flags, *data));
SetFirstTable(TablePut(source, flags, data));
}

38
deps/v8/src/compiler.cc

@ -31,14 +31,14 @@
#include "codegen-inl.h"
#include "compilation-cache.h"
#include "compiler.h"
#include "data-flow.h"
#include "debug.h"
#include "fast-codegen.h"
#include "full-codegen.h"
#include "liveedit.h"
#include "oprofile-agent.h"
#include "rewriter.h"
#include "scopes.h"
#include "usage-analyzer.h"
#include "liveedit.h"
namespace v8 {
namespace internal {
@ -48,7 +48,7 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
FunctionLiteral* function = info->function();
ASSERT(function != NULL);
// Rewrite the AST by introducing .result assignments where needed.
if (!Rewriter::Process(function) || !AnalyzeVariableUsage(function)) {
if (!Rewriter::Process(function)) {
// Signal a stack overflow by returning a null handle. The stack
// overflow exception will be thrown by the caller.
return Handle<Code>::null();
@ -79,6 +79,17 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
return Handle<Code>::null();
}
if (FLAG_use_flow_graph) {
FlowGraphBuilder builder;
builder.Build(function);
#ifdef DEBUG
if (FLAG_print_graph_text) {
builder.graph()->PrintText(builder.postorder());
}
#endif
}
// Generate code and return it. Code generator selection is governed by
// which backends are enabled and whether the function is considered
// run-once code or not:
@ -117,6 +128,14 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
}
#ifdef ENABLE_DEBUGGER_SUPPORT
Handle<Code> MakeCodeForLiveEdit(CompilationInfo* info) {
Handle<Context> context = Handle<Context>::null();
return MakeCode(context, info);
}
#endif
static Handle<JSFunction> MakeFunction(bool is_global,
bool is_eval,
Compiler::ValidationState validate,
@ -224,7 +243,7 @@ static Handle<JSFunction> MakeFunction(bool is_global,
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger
Debugger::OnAfterCompile(script, fun);
Debugger::OnAfterCompile(script, Debugger::NO_AFTER_COMPILE_FLAGS);
#endif
return fun;
@ -444,6 +463,17 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
return Handle<JSFunction>::null();
}
if (FLAG_use_flow_graph) {
FlowGraphBuilder builder;
builder.Build(literal);
#ifdef DEBUG
if (FLAG_print_graph_text) {
builder.graph()->PrintText(builder.postorder());
}
#endif
}
// Generate code and return it. The way that the compilation mode
// is controlled by the command-line flags is described in
// the static helper function MakeCode.

7
deps/v8/src/compiler.h

@ -276,6 +276,13 @@ class Compiler : public AllStatic {
};
#ifdef ENABLE_DEBUGGER_SUPPORT
Handle<Code> MakeCodeForLiveEdit(CompilationInfo* info);
#endif
// During compilation we need a global list of handles to constants
// for frame elements. When the zone gets deleted, we make sure to
// clear this list of handles as well.

2
deps/v8/src/contexts.h

@ -95,6 +95,7 @@ enum ContextLookupFlags {
call_as_constructor_delegate) \
V(EMPTY_SCRIPT_INDEX, Script, empty_script) \
V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
V(MAP_CACHE_INDEX, Object, map_cache) \
@ -216,6 +217,7 @@ class Context: public FixedArray {
CALL_AS_CONSTRUCTOR_DELEGATE_INDEX,
EMPTY_SCRIPT_INDEX,
SCRIPT_FUNCTION_INDEX,
OPAQUE_REFERENCE_FUNCTION_INDEX,
CONTEXT_EXTENSION_FUNCTION_INDEX,
OUT_OF_MEMORY_INDEX,
MAP_CACHE_INDEX,

26
deps/v8/src/conversions-inl.h

@ -59,6 +59,32 @@ static inline int FastD2I(double x) {
}
// The fast double-to-unsigned-int conversion routine does not guarantee
// rounding towards zero.
static inline unsigned int FastD2UI(double x) {
// There is no unsigned version of lrint, so there is no fast path
// in this function as there is in FastD2I. Using lrint doesn't work
// for values of 2^31 and above.
// Convert "small enough" doubles to uint32_t by fixing the 32
// least significant non-fractional bits in the low 32 bits of the
// double, and reading them from there.
const double k2Pow52 = 4503599627370496.0;
bool negative = x < 0;
if (negative) {
x = -x;
}
if (x < k2Pow52) {
x += k2Pow52;
uint32_t result;
memcpy(&result, &x, sizeof(result)); // Copy low 32 bits.
return negative ? ~result + 1 : result;
}
// Large number (outside uint32 range), Infinity or NaN.
return 0x80000000u; // Return integer indefinite.
}
static inline double DoubleToInteger(double x) {
if (isnan(x)) return 0;
if (!isfinite(x) || x == 0) return x;

3
deps/v8/src/conversions.h

@ -32,11 +32,12 @@ namespace v8 {
namespace internal {
// The fast double-to-int conversion routine does not guarantee
// The fast double-to-(unsigned-)int conversion routine does not guarantee
// rounding towards zero.
// The result is unspecified if x is infinite or NaN, or if the rounded
// integer value is outside the range of type int.
static inline int FastD2I(double x);
static inline unsigned int FastD2UI(double x);
static inline double FastI2D(int x) {

906
deps/v8/src/data-flow.cc

@ -33,6 +33,540 @@ namespace v8 {
namespace internal {
void FlowGraph::AppendInstruction(AstNode* instruction) {
ASSERT(instruction != NULL);
if (is_empty() || !exit()->IsBlockNode()) {
AppendNode(new BlockNode());
}
BlockNode::cast(exit())->AddInstruction(instruction);
}
void FlowGraph::AppendNode(Node* node) {
ASSERT(node != NULL);
if (is_empty()) {
entry_ = exit_ = node;
} else {
exit()->AddSuccessor(node);
node->AddPredecessor(exit());
exit_ = node;
}
}
void FlowGraph::AppendGraph(FlowGraph* graph) {
ASSERT(!graph->is_empty());
if (is_empty()) {
entry_ = graph->entry();
exit_ = graph->exit();
} else {
exit()->AddSuccessor(graph->entry());
graph->entry()->AddPredecessor(exit());
exit_ = graph->exit();
}
}
void FlowGraph::Split(BranchNode* branch,
FlowGraph* left,
FlowGraph* right,
JoinNode* merge) {
// Graphs are in edge split form. Add empty blocks if necessary.
if (left->is_empty()) left->AppendNode(new BlockNode());
if (right->is_empty()) right->AppendNode(new BlockNode());
// Add the branch, left flowgraph and merge.
AppendNode(branch);
AppendGraph(left);
AppendNode(merge);
// Splice in the right flowgraph.
right->AppendNode(merge);
branch->AddSuccessor(right->entry());
right->entry()->AddPredecessor(branch);
}
void FlowGraph::Loop(JoinNode* merge,
FlowGraph* condition,
BranchNode* branch,
FlowGraph* body) {
// Add the merge, condition and branch. Add merge's predecessors in
// left-to-right order.
AppendNode(merge);
body->AppendNode(merge);
AppendGraph(condition);
AppendNode(branch);
// Splice in the body flowgraph.
branch->AddSuccessor(body->entry());
body->entry()->AddPredecessor(branch);
}
void EntryNode::Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder) {
ASSERT(successor_ != NULL);
preorder->Add(this);
if (!successor_->IsMarkedWith(mark)) {
successor_->MarkWith(mark);
successor_->Traverse(mark, preorder, postorder);
}
postorder->Add(this);
}
void ExitNode::Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder) {
preorder->Add(this);
postorder->Add(this);
}
void BlockNode::Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder) {
ASSERT(successor_ != NULL);
preorder->Add(this);
if (!successor_->IsMarkedWith(mark)) {
successor_->MarkWith(mark);
successor_->Traverse(mark, preorder, postorder);
}
postorder->Add(this);
}
void BranchNode::Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder) {
ASSERT(successor0_ != NULL && successor1_ != NULL);
preorder->Add(this);
if (!successor0_->IsMarkedWith(mark)) {
successor0_->MarkWith(mark);
successor0_->Traverse(mark, preorder, postorder);
}
if (!successor1_->IsMarkedWith(mark)) {
successor1_->MarkWith(mark);
successor1_->Traverse(mark, preorder, postorder);
}
postorder->Add(this);
}
void JoinNode::Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder) {
ASSERT(successor_ != NULL);
preorder->Add(this);
if (!successor_->IsMarkedWith(mark)) {
successor_->MarkWith(mark);
successor_->Traverse(mark, preorder, postorder);
}
postorder->Add(this);
}
void FlowGraphBuilder::Build(FunctionLiteral* lit) {
graph_ = FlowGraph::Empty();
graph_.AppendNode(new EntryNode());
global_exit_ = new ExitNode();
VisitStatements(lit->body());
if (HasStackOverflow()) {
graph_ = FlowGraph::Empty();
return;
}
graph_.AppendNode(global_exit_);
// Build preorder and postorder traversal orders. All the nodes in
// the graph have the same mark flag. For the traversal, use that
// flag's negation. Traversal will flip all the flags.
bool mark = graph_.entry()->IsMarkedWith(false);
graph_.entry()->MarkWith(mark);
graph_.entry()->Traverse(mark, &preorder_, &postorder_);
}
void FlowGraphBuilder::VisitDeclaration(Declaration* decl) {
UNREACHABLE();
}
void FlowGraphBuilder::VisitBlock(Block* stmt) {
VisitStatements(stmt->statements());
}
void FlowGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
Visit(stmt->expression());
}
void FlowGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
// Nothing to do.
}
void FlowGraphBuilder::VisitIfStatement(IfStatement* stmt) {
Visit(stmt->condition());
BranchNode* branch = new BranchNode();
FlowGraph original = graph_;
graph_ = FlowGraph::Empty();
Visit(stmt->then_statement());
FlowGraph left = graph_;
graph_ = FlowGraph::Empty();
Visit(stmt->else_statement());
JoinNode* join = new JoinNode();
original.Split(branch, &left, &graph_, join);
graph_ = original;
}
void FlowGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
Visit(stmt->expression());
graph_.AppendInstruction(stmt);
graph_.AppendNode(global_exit());
}
void FlowGraphBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
Visit(stmt->expression());
graph_.AppendInstruction(stmt);
}
void FlowGraphBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
graph_.AppendInstruction(stmt);
}
void FlowGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
JoinNode* join = new JoinNode();
FlowGraph original = graph_;
graph_ = FlowGraph::Empty();
Visit(stmt->body());
FlowGraph body = graph_;
graph_ = FlowGraph::Empty();
Visit(stmt->cond());
BranchNode* branch = new BranchNode();
// Add body, condition and branch.
original.AppendNode(join);
original.AppendGraph(&body);
original.AppendGraph(&graph_); // The condition.
original.AppendNode(branch);
// Tie the knot.
branch->AddSuccessor(join);
join->AddPredecessor(branch);
graph_ = original;
}
void FlowGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
JoinNode* join = new JoinNode();
FlowGraph original = graph_;
graph_ = FlowGraph::Empty();
Visit(stmt->cond());
BranchNode* branch = new BranchNode();
FlowGraph condition = graph_;
graph_ = FlowGraph::Empty();
Visit(stmt->body());
original.Loop(join, &condition, branch, &graph_);
graph_ = original;
}
void FlowGraphBuilder::VisitForStatement(ForStatement* stmt) {
if (stmt->init() != NULL) Visit(stmt->init());
JoinNode* join = new JoinNode();
FlowGraph original = graph_;
graph_ = FlowGraph::Empty();
if (stmt->cond() != NULL) Visit(stmt->cond());
BranchNode* branch = new BranchNode();
FlowGraph condition = graph_;
graph_ = FlowGraph::Empty();
Visit(stmt->body());
if (stmt->next() != NULL) Visit(stmt->next());
original.Loop(join, &condition, branch, &graph_);
graph_ = original;
}
void FlowGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
Visit(stmt->enumerable());
JoinNode* join = new JoinNode();
FlowGraph empty;
BranchNode* branch = new BranchNode();
FlowGraph original = graph_;
graph_ = FlowGraph::Empty();
Visit(stmt->body());
original.Loop(join, &empty, branch, &graph_);
graph_ = original;
}
void FlowGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
graph_.AppendInstruction(stmt);
}
void FlowGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitFunctionBoilerplateLiteral(
FunctionBoilerplateLiteral* expr) {
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitConditional(Conditional* expr) {
Visit(expr->condition());
BranchNode* branch = new BranchNode();
FlowGraph original = graph_;
graph_ = FlowGraph::Empty();
Visit(expr->then_expression());
FlowGraph left = graph_;
graph_ = FlowGraph::Empty();
Visit(expr->else_expression());
JoinNode* join = new JoinNode();
original.Split(branch, &left, &graph_, join);
graph_ = original;
}
void FlowGraphBuilder::VisitSlot(Slot* expr) {
UNREACHABLE();
}
void FlowGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitLiteral(Literal* expr) {
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
for (int i = 0, len = properties->length(); i < len; i++) {
Visit(properties->at(i)->value());
}
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
ZoneList<Expression*>* values = expr->values();
for (int i = 0, len = values->length(); i < len; i++) {
Visit(values->at(i));
}
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
Property* prop = expr->target()->AsProperty();
// Left-hand side can be a variable or property (or reference error) but
// not both.
ASSERT(var == NULL || prop == NULL);
if (var != NULL) {
Visit(expr->value());
if (var->IsStackAllocated()) definitions_.Add(expr);
} else if (prop != NULL) {
Visit(prop->obj());
if (!prop->key()->IsPropertyName()) Visit(prop->key());
Visit(expr->value());
}
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitThrow(Throw* expr) {
Visit(expr->exception());
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitProperty(Property* expr) {
Visit(expr->obj());
if (!expr->key()->IsPropertyName()) Visit(expr->key());
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitCall(Call* expr) {
Visit(expr->expression());
ZoneList<Expression*>* arguments = expr->arguments();
for (int i = 0, len = arguments->length(); i < len; i++) {
Visit(arguments->at(i));
}
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitCallNew(CallNew* expr) {
Visit(expr->expression());
ZoneList<Expression*>* arguments = expr->arguments();
for (int i = 0, len = arguments->length(); i < len; i++) {
Visit(arguments->at(i));
}
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
ZoneList<Expression*>* arguments = expr->arguments();
for (int i = 0, len = arguments->length(); i < len; i++) {
Visit(arguments->at(i));
}
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
Visit(expr->expression());
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitCountOperation(CountOperation* expr) {
Visit(expr->expression());
Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
if (var != NULL && var->IsStackAllocated()) {
definitions_.Add(expr);
}
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
Visit(expr->left());
switch (expr->op()) {
case Token::COMMA:
Visit(expr->right());
break;
case Token::OR: {
BranchNode* branch = new BranchNode();
FlowGraph original = graph_;
graph_ = FlowGraph::Empty();
Visit(expr->right());
FlowGraph empty;
JoinNode* join = new JoinNode();
original.Split(branch, &empty, &graph_, join);
graph_ = original;
break;
}
case Token::AND: {
BranchNode* branch = new BranchNode();
FlowGraph original = graph_;
graph_ = FlowGraph::Empty();
Visit(expr->right());
FlowGraph empty;
JoinNode* join = new JoinNode();
original.Split(branch, &graph_, &empty, join);
graph_ = original;
break;
}
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND:
case Token::SHL:
case Token::SAR:
case Token::SHR:
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV:
case Token::MOD:
Visit(expr->right());
graph_.AppendInstruction(expr);
break;
default:
UNREACHABLE();
}
}
void FlowGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
Visit(expr->left());
Visit(expr->right());
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitThisFunction(ThisFunction* expr) {
graph_.AppendInstruction(expr);
}
void AstLabeler::Label(CompilationInfo* info) {
info_ = info;
VisitStatements(info_->function()->body());
@ -204,6 +738,9 @@ void AstLabeler::VisitAssignment(Assignment* expr) {
USE(proxy);
ASSERT(proxy != NULL && proxy->var()->is_this());
info()->set_has_this_properties(true);
prop->obj()->set_num(AstNode::kNoNumber);
prop->key()->set_num(AstNode::kNoNumber);
Visit(expr->value());
expr->set_num(next_number_++);
}
@ -220,6 +757,9 @@ void AstLabeler::VisitProperty(Property* expr) {
USE(proxy);
ASSERT(proxy != NULL && proxy->var()->is_this());
info()->set_has_this_properties(true);
expr->obj()->set_num(AstNode::kNoNumber);
expr->key()->set_num(AstNode::kNoNumber);
expr->set_num(next_number_++);
}
@ -558,4 +1098,370 @@ void LivenessAnalyzer::VisitThisFunction(ThisFunction* expr) {
}
#ifdef DEBUG
// Print a textual representation of an instruction in a flow graph. Using
// the AstVisitor is overkill because there is no recursion here. It is
// only used for printing in debug mode.
class TextInstructionPrinter: public AstVisitor {
public:
TextInstructionPrinter() {}
private:
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
DISALLOW_COPY_AND_ASSIGN(TextInstructionPrinter);
};
void TextInstructionPrinter::VisitDeclaration(Declaration* decl) {
UNREACHABLE();
}
void TextInstructionPrinter::VisitBlock(Block* stmt) {
PrintF("Block");
}
void TextInstructionPrinter::VisitExpressionStatement(
ExpressionStatement* stmt) {
PrintF("ExpressionStatement");
}
void TextInstructionPrinter::VisitEmptyStatement(EmptyStatement* stmt) {
PrintF("EmptyStatement");
}
void TextInstructionPrinter::VisitIfStatement(IfStatement* stmt) {
PrintF("IfStatement");
}
void TextInstructionPrinter::VisitContinueStatement(ContinueStatement* stmt) {
UNREACHABLE();
}
void TextInstructionPrinter::VisitBreakStatement(BreakStatement* stmt) {
UNREACHABLE();
}
void TextInstructionPrinter::VisitReturnStatement(ReturnStatement* stmt) {
PrintF("return @%d", stmt->expression()->num());
}
void TextInstructionPrinter::VisitWithEnterStatement(WithEnterStatement* stmt) {
PrintF("WithEnterStatement");
}
void TextInstructionPrinter::VisitWithExitStatement(WithExitStatement* stmt) {
PrintF("WithExitStatement");
}
void TextInstructionPrinter::VisitSwitchStatement(SwitchStatement* stmt) {
UNREACHABLE();
}
void TextInstructionPrinter::VisitDoWhileStatement(DoWhileStatement* stmt) {
PrintF("DoWhileStatement");
}
void TextInstructionPrinter::VisitWhileStatement(WhileStatement* stmt) {
PrintF("WhileStatement");
}
void TextInstructionPrinter::VisitForStatement(ForStatement* stmt) {
PrintF("ForStatement");
}
void TextInstructionPrinter::VisitForInStatement(ForInStatement* stmt) {
PrintF("ForInStatement");
}
void TextInstructionPrinter::VisitTryCatchStatement(TryCatchStatement* stmt) {
UNREACHABLE();
}
void TextInstructionPrinter::VisitTryFinallyStatement(
TryFinallyStatement* stmt) {
UNREACHABLE();
}
void TextInstructionPrinter::VisitDebuggerStatement(DebuggerStatement* stmt) {
PrintF("DebuggerStatement");
}
void TextInstructionPrinter::VisitFunctionLiteral(FunctionLiteral* expr) {
PrintF("FunctionLiteral");
}
void TextInstructionPrinter::VisitFunctionBoilerplateLiteral(
FunctionBoilerplateLiteral* expr) {
PrintF("FunctionBoilerplateLiteral");
}
void TextInstructionPrinter::VisitConditional(Conditional* expr) {
PrintF("Conditional");
}
void TextInstructionPrinter::VisitSlot(Slot* expr) {
UNREACHABLE();
}
void TextInstructionPrinter::VisitVariableProxy(VariableProxy* expr) {
Variable* var = expr->AsVariable();
if (var != NULL) {
SmartPointer<char> name = var->name()->ToCString();
PrintF("%s", *name);
} else {
ASSERT(expr->AsProperty() != NULL);
VisitProperty(expr->AsProperty());
}
}
void TextInstructionPrinter::VisitLiteral(Literal* expr) {
expr->handle()->ShortPrint();
}
void TextInstructionPrinter::VisitRegExpLiteral(RegExpLiteral* expr) {
PrintF("RegExpLiteral");
}
void TextInstructionPrinter::VisitObjectLiteral(ObjectLiteral* expr) {
PrintF("ObjectLiteral");
}
void TextInstructionPrinter::VisitArrayLiteral(ArrayLiteral* expr) {
PrintF("ArrayLiteral");
}
void TextInstructionPrinter::VisitCatchExtensionObject(
CatchExtensionObject* expr) {
PrintF("CatchExtensionObject");
}
void TextInstructionPrinter::VisitAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
Property* prop = expr->target()->AsProperty();
if (var != NULL) {
SmartPointer<char> name = var->name()->ToCString();
PrintF("%s %s @%d",
*name,
Token::String(expr->op()),
expr->value()->num());
} else if (prop != NULL) {
if (prop->key()->IsPropertyName()) {
PrintF("@%d.", prop->obj()->num());
ASSERT(prop->key()->AsLiteral() != NULL);
prop->key()->AsLiteral()->handle()->Print();
PrintF(" %s @%d",
Token::String(expr->op()),
expr->value()->num());
} else {
PrintF("@%d[@%d] %s @%d",
prop->obj()->num(),
prop->key()->num(),
Token::String(expr->op()),
expr->value()->num());
}
} else {
// Throw reference error.
Visit(expr->target());
}
}
void TextInstructionPrinter::VisitThrow(Throw* expr) {
PrintF("throw @%d", expr->exception()->num());
}
void TextInstructionPrinter::VisitProperty(Property* expr) {
if (expr->key()->IsPropertyName()) {
PrintF("@%d.", expr->obj()->num());
ASSERT(expr->key()->AsLiteral() != NULL);
expr->key()->AsLiteral()->handle()->Print();
} else {
PrintF("@%d[@%d]", expr->obj()->num(), expr->key()->num());
}
}
void TextInstructionPrinter::VisitCall(Call* expr) {
PrintF("@%d(", expr->expression()->num());
ZoneList<Expression*>* arguments = expr->arguments();
for (int i = 0, len = arguments->length(); i < len; i++) {
if (i != 0) PrintF(", ");
PrintF("@%d", arguments->at(i)->num());
}
PrintF(")");
}
void TextInstructionPrinter::VisitCallNew(CallNew* expr) {
PrintF("new @%d(", expr->expression()->num());
ZoneList<Expression*>* arguments = expr->arguments();
for (int i = 0, len = arguments->length(); i < len; i++) {
if (i != 0) PrintF(", ");
PrintF("@%d", arguments->at(i)->num());
}
PrintF(")");
}
void TextInstructionPrinter::VisitCallRuntime(CallRuntime* expr) {
SmartPointer<char> name = expr->name()->ToCString();
PrintF("%s(", *name);
ZoneList<Expression*>* arguments = expr->arguments();
for (int i = 0, len = arguments->length(); i < len; i++) {
if (i != 0) PrintF(", ");
PrintF("@%d", arguments->at(i)->num());
}
PrintF(")");
}
void TextInstructionPrinter::VisitUnaryOperation(UnaryOperation* expr) {
PrintF("%s(@%d)", Token::String(expr->op()), expr->expression()->num());
}
void TextInstructionPrinter::VisitCountOperation(CountOperation* expr) {
if (expr->is_prefix()) {
PrintF("%s@%d", Token::String(expr->op()), expr->expression()->num());
} else {
PrintF("@%d%s", expr->expression()->num(), Token::String(expr->op()));
}
}
void TextInstructionPrinter::VisitBinaryOperation(BinaryOperation* expr) {
ASSERT(expr->op() != Token::COMMA);
ASSERT(expr->op() != Token::OR);
ASSERT(expr->op() != Token::AND);
PrintF("@%d %s @%d",
expr->left()->num(),
Token::String(expr->op()),
expr->right()->num());
}
void TextInstructionPrinter::VisitCompareOperation(CompareOperation* expr) {
PrintF("@%d %s @%d",
expr->left()->num(),
Token::String(expr->op()),
expr->right()->num());
}
void TextInstructionPrinter::VisitThisFunction(ThisFunction* expr) {
PrintF("ThisFunction");
}
static int node_count = 0;
static int instruction_count = 0;
void Node::AssignNumbers() {
set_number(node_count++);
}
void BlockNode::AssignNumbers() {
set_number(node_count++);
for (int i = 0, len = instructions_.length(); i < len; i++) {
instructions_[i]->set_num(instruction_count++);
}
}
void EntryNode::PrintText() {
PrintF("L%d: Entry\n", number());
PrintF("goto L%d\n\n", successor_->number());
}
void ExitNode::PrintText() {
PrintF("L%d: Exit\n\n", number());
}
void BlockNode::PrintText() {
// Print the instructions in the block.
PrintF("L%d: Block\n", number());
TextInstructionPrinter printer;
for (int i = 0, len = instructions_.length(); i < len; i++) {
PrintF("%d ", instructions_[i]->num());
printer.Visit(instructions_[i]);
PrintF("\n");
}
PrintF("goto L%d\n\n", successor_->number());
}
void BranchNode::PrintText() {
PrintF("L%d: Branch\n", number());
PrintF("goto (L%d, L%d)\n\n", successor0_->number(), successor1_->number());
}
void JoinNode::PrintText() {
PrintF("L%d: Join(", number());
for (int i = 0, len = predecessors_.length(); i < len; i++) {
if (i != 0) PrintF(", ");
PrintF("L%d", predecessors_[i]->number());
}
PrintF(")\ngoto L%d\n\n", successor_->number());
}
void FlowGraph::PrintText(ZoneList<Node*>* postorder) {
PrintF("\n========\n");
// Number nodes and instructions in reverse postorder.
node_count = 0;
instruction_count = 0;
for (int i = postorder->length() - 1; i >= 0; i--) {
postorder->at(i)->AssignNumbers();
}
// Print basic blocks in reverse postorder.
for (int i = postorder->length() - 1; i >= 0; i--) {
postorder->at(i)->PrintText();
}
}
#endif // defined(DEBUG)
} } // namespace v8::internal

391
deps/v8/src/data-flow.h

@ -28,12 +28,403 @@
#ifndef V8_DATAFLOW_H_
#define V8_DATAFLOW_H_
#include "v8.h"
#include "ast.h"
#include "compiler.h"
#include "zone-inl.h"
namespace v8 {
namespace internal {
class BitVector: public ZoneObject {
public:
explicit BitVector(int length)
: length_(length),
data_length_(SizeFor(length)),
data_(Zone::NewArray<uint32_t>(data_length_)) {
ASSERT(length > 0);
Clear();
}
BitVector(const BitVector& other)
: length_(other.length()),
data_length_(SizeFor(length_)),
data_(Zone::NewArray<uint32_t>(data_length_)) {
CopyFrom(other);
}
static int SizeFor(int length) {
return 1 + ((length - 1) / 32);
}
BitVector& operator=(const BitVector& rhs) {
if (this != &rhs) CopyFrom(rhs);
return *this;
}
void CopyFrom(const BitVector& other) {
ASSERT(other.length() == length());
for (int i = 0; i < data_length_; i++) {
data_[i] = other.data_[i];
}
}
bool Contains(int i) {
ASSERT(i >= 0 && i < length());
uint32_t block = data_[i / 32];
return (block & (1U << (i % 32))) != 0;
}
void Add(int i) {
ASSERT(i >= 0 && i < length());
data_[i / 32] |= (1U << (i % 32));
}
void Remove(int i) {
ASSERT(i >= 0 && i < length());
data_[i / 32] &= ~(1U << (i % 32));
}
void Union(const BitVector& other) {
ASSERT(other.length() == length());
for (int i = 0; i < data_length_; i++) {
data_[i] |= other.data_[i];
}
}
void Intersect(const BitVector& other) {
ASSERT(other.length() == length());
for (int i = 0; i < data_length_; i++) {
data_[i] &= other.data_[i];
}
}
void Clear() {
for (int i = 0; i < data_length_; i++) {
data_[i] = 0;
}
}
bool IsEmpty() const {
for (int i = 0; i < data_length_; i++) {
if (data_[i] != 0) return false;
}
return true;
}
int length() const { return length_; }
private:
int length_;
int data_length_;
uint32_t* data_;
};
// Forward declarations of Node types.
class Node;
class BranchNode;
class JoinNode;
// Flow graphs have a single entry and single exit. The empty flowgraph is
// represented by both entry and exit being NULL.
class FlowGraph BASE_EMBEDDED {
public:
FlowGraph() : entry_(NULL), exit_(NULL) {}
static FlowGraph Empty() { return FlowGraph(); }
bool is_empty() const { return entry_ == NULL; }
Node* entry() const { return entry_; }
Node* exit() const { return exit_; }
// Add a single instruction to the end of this flowgraph.
void AppendInstruction(AstNode* instruction);
// Add a single node to the end of this flow graph.
void AppendNode(Node* node);
// Add a flow graph fragment to the end of this one.
void AppendGraph(FlowGraph* graph);
// Concatenate an if-then-else flow-graph to this one. Control is split
// and merged, so the graph remains single-entry, single-exit.
void Split(BranchNode* branch,
FlowGraph* left,
FlowGraph* right,
JoinNode* merge);
// Concatenate a forward loop (e.g., while or for loop) flow-graph to this
// one. Control is split by the condition and merged back from the back
// edge at end of the body to the beginning of the condition. The single
// (free) exit of the result graph is the right (false) arm of the branch
// node.
void Loop(JoinNode* merge,
FlowGraph* condition,
BranchNode* branch,
FlowGraph* body);
#ifdef DEBUG
void PrintText(ZoneList<Node*>* postorder);
#endif
private:
Node* entry_;
Node* exit_;
};
// Flow-graph nodes.
class Node: public ZoneObject {
public:
Node() : number_(-1), mark_(false) {}
virtual ~Node() {}
virtual bool IsBlockNode() { return false; }
virtual bool IsJoinNode() { return false; }
virtual void AddPredecessor(Node* predecessor) = 0;
virtual void AddSuccessor(Node* successor) = 0;
bool IsMarkedWith(bool mark) { return mark_ == mark; }
void MarkWith(bool mark) { mark_ = mark; }
// Perform a depth first search and record preorder and postorder
// traversal orders.
virtual void Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder) = 0;
int number() { return number_; }
void set_number(int number) { number_ = number; }
#ifdef DEBUG
virtual void AssignNumbers();
virtual void PrintText() = 0;
#endif
private:
int number_;
bool mark_;
DISALLOW_COPY_AND_ASSIGN(Node);
};
// An entry node has no predecessors and a single successor.
class EntryNode: public Node {
public:
EntryNode() : successor_(NULL) {}
void AddPredecessor(Node* predecessor) { UNREACHABLE(); }
void AddSuccessor(Node* successor) {
ASSERT(successor_ == NULL && successor != NULL);
successor_ = successor;
}
void Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder);
#ifdef DEBUG
void PrintText();
#endif
private:
Node* successor_;
DISALLOW_COPY_AND_ASSIGN(EntryNode);
};
// An exit node has a arbitrarily many predecessors and no successors.
class ExitNode: public Node {
public:
ExitNode() : predecessors_(4) {}
void AddPredecessor(Node* predecessor) {
ASSERT(predecessor != NULL);
predecessors_.Add(predecessor);
}
void AddSuccessor(Node* successor) { /* Do nothing. */ }
void Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder);
#ifdef DEBUG
void PrintText();
#endif
private:
ZoneList<Node*> predecessors_;
DISALLOW_COPY_AND_ASSIGN(ExitNode);
};
// Block nodes have a single successor and predecessor and a list of
// instructions.
class BlockNode: public Node {
public:
BlockNode() : predecessor_(NULL), successor_(NULL), instructions_(4) {}
static BlockNode* cast(Node* node) {
ASSERT(node->IsBlockNode());
return reinterpret_cast<BlockNode*>(node);
}
bool IsBlockNode() { return true; }
void AddPredecessor(Node* predecessor) {
ASSERT(predecessor_ == NULL && predecessor != NULL);
predecessor_ = predecessor;
}
void AddSuccessor(Node* successor) {
ASSERT(successor_ == NULL && successor != NULL);
successor_ = successor;
}
void AddInstruction(AstNode* instruction) {
instructions_.Add(instruction);
}
void Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder);
#ifdef DEBUG
void AssignNumbers();
void PrintText();
#endif
private:
Node* predecessor_;
Node* successor_;
ZoneList<AstNode*> instructions_;
DISALLOW_COPY_AND_ASSIGN(BlockNode);
};
// Branch nodes have a single predecessor and a pair of successors.
class BranchNode: public Node {
public:
BranchNode() : predecessor_(NULL), successor0_(NULL), successor1_(NULL) {}
void AddPredecessor(Node* predecessor) {
ASSERT(predecessor_ == NULL && predecessor != NULL);
predecessor_ = predecessor;
}
void AddSuccessor(Node* successor) {
ASSERT(successor1_ == NULL && successor != NULL);
if (successor0_ == NULL) {
successor0_ = successor;
} else {
successor1_ = successor;
}
}
void Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder);
#ifdef DEBUG
void PrintText();
#endif
private:
Node* predecessor_;
Node* successor0_;
Node* successor1_;
DISALLOW_COPY_AND_ASSIGN(BranchNode);
};
// Join nodes have arbitrarily many predecessors and a single successor.
class JoinNode: public Node {
public:
JoinNode() : predecessors_(2), successor_(NULL) {}
static JoinNode* cast(Node* node) {
ASSERT(node->IsJoinNode());
return reinterpret_cast<JoinNode*>(node);
}
bool IsJoinNode() { return true; }
void AddPredecessor(Node* predecessor) {
ASSERT(predecessor != NULL);
predecessors_.Add(predecessor);
}
void AddSuccessor(Node* successor) {
ASSERT(successor_ == NULL && successor != NULL);
successor_ = successor;
}
void Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder);
#ifdef DEBUG
void PrintText();
#endif
private:
ZoneList<Node*> predecessors_;
Node* successor_;
DISALLOW_COPY_AND_ASSIGN(JoinNode);
};
// Construct a flow graph from a function literal. Build pre- and postorder
// traversal orders as a byproduct.
class FlowGraphBuilder: public AstVisitor {
public:
FlowGraphBuilder()
: global_exit_(NULL),
preorder_(4),
postorder_(4),
definitions_(4) {
}
void Build(FunctionLiteral* lit);
FlowGraph* graph() { return &graph_; }
ZoneList<Node*>* postorder() { return &postorder_; }
private:
ExitNode* global_exit() { return global_exit_; }
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
FlowGraph graph_;
ExitNode* global_exit_;
ZoneList<Node*> preorder_;
ZoneList<Node*> postorder_;
// The flow graph builder collects a list of definitions (assignments and
// count operations) to stack-allocated variables to use for reaching
// definitions analysis.
ZoneList<AstNode*> definitions_;
DISALLOW_COPY_AND_ASSIGN(FlowGraphBuilder);
};
// This class is used to number all expressions in the AST according to
// their evaluation order (post-order left-to-right traversal).
class AstLabeler: public AstVisitor {

29
deps/v8/src/date-delay.js

@ -113,8 +113,11 @@ function EquivalentTime(t) {
// we must do this, but for compatibility with other browsers, we use
// the actual year if it is in the range 1970..2037
if (t >= 0 && t <= 2.1e12) return t;
var day = MakeDay(EquivalentYear(YEAR_FROM_TIME(t)), MONTH_FROM_TIME(t), DATE_FROM_TIME(t));
return TimeClip(MakeDate(day, TimeWithinDay(t)));
var day = MakeDay(EquivalentYear(YEAR_FROM_TIME(t)),
MONTH_FROM_TIME(t),
DATE_FROM_TIME(t));
return MakeDate(day, TimeWithinDay(t));
}
@ -257,14 +260,6 @@ function TimeInYear(year) {
}
// Compute modified Julian day from year, month, date.
function ToJulianDay(year, month, date) {
var jy = (month > 1) ? year : year - 1;
var jm = (month > 1) ? month + 2 : month + 14;
var ja = FLOOR(jy / 100);
return FLOOR(FLOOR(365.25*jy) + FLOOR(30.6001*jm) + date + 1720995) + 2 - ja + FLOOR(0.25*ja);
}
var four_year_cycle_table = CalculateDateTable();
@ -359,20 +354,18 @@ function FromJulianDay(julian) {
function MakeDay(year, month, date) {
if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return $NaN;
// Conversion to integers.
year = TO_INTEGER(year);
month = TO_INTEGER(month);
date = TO_INTEGER(date);
// Overflow months into year.
year = year + FLOOR(month/12);
month = month % 12;
if (month < 0) {
month += 12;
if (year < kMinYear || year > kMaxYear ||
month < kMinMonth || month > kMaxMonth ||
date < kMinDate || date > kMaxDate) {
return $NaN;
}
// Return days relative to Jan 1 1970.
return ToJulianDay(year, month, date) - kDayZeroInJulianDay;
// Now we rely on year, month and date being SMIs.
return %DateMakeDay(year, month, date);
}

50
deps/v8/src/debug-delay.js

@ -1251,7 +1251,9 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request)
} else if (request.command == 'version') {
this.versionRequest_(request, response);
} else if (request.command == 'profile') {
this.profileRequest_(request, response);
this.profileRequest_(request, response);
} else if (request.command == 'changelive') {
this.changeLiveRequest_(request, response);
} else {
throw new Error('Unknown command "' + request.command + '" in request');
}
@ -1954,6 +1956,52 @@ DebugCommandProcessor.prototype.profileRequest_ = function(request, response) {
};
DebugCommandProcessor.prototype.changeLiveRequest_ = function(request, response) {
if (!Debug.LiveEditChangeScript) {
return response.failed('LiveEdit feature is not supported');
}
if (!request.arguments) {
return response.failed('Missing arguments');
}
var script_id = request.arguments.script_id;
var change_pos = parseInt(request.arguments.change_pos);
var change_len = parseInt(request.arguments.change_len);
var new_string = request.arguments.new_string;
if (!IS_STRING(new_string)) {
response.failed('Argument "new_string" is not a string value');
return;
}
var scripts = %DebugGetLoadedScripts();
var the_script = null;
for (var i = 0; i < scripts.length; i++) {
if (scripts[i].id == script_id) {
the_script = scripts[i];
}
}
if (!the_script) {
response.failed('Script not found');
return;
}
var change_log = new Array();
try {
Debug.LiveEditChangeScript(the_script, change_pos, change_len, new_string,
change_log);
} catch (e) {
if (e instanceof Debug.LiveEditChangeScript.Failure) {
// Let's treat it as a "success" so that body with change_log will be
// sent back. "change_log" will have "failure" field set.
change_log.push( { failure: true } );
} else {
throw e;
}
}
response.body = {change_log: change_log};
};
// Check whether the previously processed command caused the VM to become
// running.
DebugCommandProcessor.prototype.isRunning = function() {

54
deps/v8/src/debug.cc

@ -39,6 +39,7 @@
#include "global-handles.h"
#include "ic.h"
#include "ic-inl.h"
#include "messages.h"
#include "natives.h"
#include "stub-cache.h"
#include "log.h"
@ -123,7 +124,9 @@ void BreakLocationIterator::Next() {
if (RelocInfo::IsCodeTarget(rmode())) {
Address target = original_rinfo()->target_address();
Code* code = Code::GetCodeFromTargetAddress(target);
if (code->is_inline_cache_stub() || RelocInfo::IsConstructCall(rmode())) {
if ((code->is_inline_cache_stub() &&
code->kind() != Code::BINARY_OP_IC) ||
RelocInfo::IsConstructCall(rmode())) {
break_point_++;
return;
}
@ -755,6 +758,12 @@ bool Debug::Load() {
bool caught_exception =
!CompileDebuggerScript(Natives::GetIndex("mirror")) ||
!CompileDebuggerScript(Natives::GetIndex("debug"));
if (FLAG_enable_liveedit) {
caught_exception = caught_exception ||
!CompileDebuggerScript(Natives::GetIndex("liveedit"));
}
Debugger::set_compiling_natives(false);
// Make sure we mark the debugger as not loading before we might
@ -1337,24 +1346,26 @@ Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
// Find the builtin debug break function matching the calling convention
// used by the call site.
if (code->is_inline_cache_stub()) {
if (code->is_call_stub()) {
return ComputeCallDebugBreak(code->arguments_count());
}
if (code->is_load_stub()) {
return Handle<Code>(Builtins::builtin(Builtins::LoadIC_DebugBreak));
}
if (code->is_store_stub()) {
return Handle<Code>(Builtins::builtin(Builtins::StoreIC_DebugBreak));
}
if (code->is_keyed_load_stub()) {
Handle<Code> result =
Handle<Code>(Builtins::builtin(Builtins::KeyedLoadIC_DebugBreak));
return result;
}
if (code->is_keyed_store_stub()) {
Handle<Code> result =
Handle<Code>(Builtins::builtin(Builtins::KeyedStoreIC_DebugBreak));
return result;
switch (code->kind()) {
case Code::CALL_IC:
return ComputeCallDebugBreak(code->arguments_count());
case Code::LOAD_IC:
return Handle<Code>(Builtins::builtin(Builtins::LoadIC_DebugBreak));
case Code::STORE_IC:
return Handle<Code>(Builtins::builtin(Builtins::StoreIC_DebugBreak));
case Code::KEYED_LOAD_IC:
return Handle<Code>(
Builtins::builtin(Builtins::KeyedLoadIC_DebugBreak));
case Code::KEYED_STORE_IC:
return Handle<Code>(
Builtins::builtin(Builtins::KeyedStoreIC_DebugBreak));
default:
UNREACHABLE();
}
}
if (RelocInfo::IsConstructCall(mode)) {
@ -1959,7 +1970,8 @@ void Debugger::OnBeforeCompile(Handle<Script> script) {
// Handle debugger actions when a new script is compiled.
void Debugger::OnAfterCompile(Handle<Script> script, Handle<JSFunction> fun) {
void Debugger::OnAfterCompile(Handle<Script> script,
AfterCompileFlags after_compile_flags) {
HandleScope scope;
// Add the newly compiled script to the script cache.
@ -2006,7 +2018,7 @@ void Debugger::OnAfterCompile(Handle<Script> script, Handle<JSFunction> fun) {
return;
}
// Bail out based on state or if there is no listener for this event
if (in_debugger) return;
if (in_debugger && (after_compile_flags & SEND_WHEN_DEBUGGING) == 0) return;
if (!Debugger::EventActive(v8::AfterCompile)) return;
// Create the compile state object.

7
deps/v8/src/debug.h

@ -604,8 +604,13 @@ class Debugger {
static void OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue);
static void OnException(Handle<Object> exception, bool uncaught);
static void OnBeforeCompile(Handle<Script> script);
enum AfterCompileFlags {
NO_AFTER_COMPILE_FLAGS,
SEND_WHEN_DEBUGGING
};
static void OnAfterCompile(Handle<Script> script,
Handle<JSFunction> fun);
AfterCompileFlags after_compile_flags);
static void OnNewFunction(Handle<JSFunction> fun);
static void OnScriptCollected(int id);
static void ProcessDebugEvent(v8::DebugEvent event,

3
deps/v8/src/factory.h

@ -30,11 +30,12 @@
#include "globals.h"
#include "heap.h"
#include "zone-inl.h"
namespace v8 {
namespace internal {
// Forward declarations.
class ZoneScopeInfo;
// Interface for handle based allocation.

1
deps/v8/src/fast-codegen.h

@ -93,6 +93,7 @@ class FastCodeGenerator: public AstVisitor {
Register accumulator1();
Register scratch0();
Register scratch1();
Register scratch2();
Register receiver_reg();
Register context_reg();

9
deps/v8/src/flag-definitions.h

@ -153,6 +153,9 @@ DEFINE_bool(always_fast_compiler, false,
"try to use the speculative optimizing backend for all code")
DEFINE_bool(trace_bailout, false,
"print reasons for falling back to using the classic V8 backend")
DEFINE_bool(safe_int32_compiler, false,
"enable optimized side-effect-free int32 expressions.")
DEFINE_bool(use_flow_graph, false, "perform flow-graph based optimizations")
// compilation-cache.cc
DEFINE_bool(compilation_cache, true, "enable compilation cache")
@ -163,6 +166,7 @@ DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
DEFINE_bool(debugger_auto_break, true,
"automatically set the debug break flag when debugger commands are "
"in the queue")
DEFINE_bool(enable_liveedit, true, "enable liveedit experimental feature")
// frames.cc
DEFINE_int(max_stack_trace_source_length, 300,
@ -230,9 +234,6 @@ DEFINE_bool(trace_exception, false,
DEFINE_bool(preallocate_message_memory, false,
"preallocate some memory to build stack traces.")
// usage-analyzer.cc
DEFINE_bool(usage_computation, true, "compute variable usage counts")
// v8.cc
DEFINE_bool(preemption, false,
"activate a 100ms timer that switches between V8 threads")
@ -304,6 +305,8 @@ DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
DEFINE_bool(print_scopes, false, "print scopes")
DEFINE_bool(print_ir, false, "print the AST as seen by the backend")
DEFINE_bool(print_graph_text, false,
"print a text representation of the flow graph")
// contexts.cc
DEFINE_bool(trace_contexts, false, "trace contexts operations")

1
deps/v8/src/frame-element.cc

@ -28,6 +28,7 @@
#include "v8.h"
#include "frame-element.h"
#include "zone-inl.h"
namespace v8 {
namespace internal {

33
deps/v8/src/frame-element.h

@ -30,6 +30,7 @@
#include "number-info.h"
#include "macro-assembler.h"
#include "zone.h"
namespace v8 {
namespace internal {
@ -53,23 +54,25 @@ class FrameElement BASE_EMBEDDED {
SYNCED
};
inline NumberInfo::Type number_info() {
inline NumberInfo number_info() {
// Copied elements do not have number info. Instead
// we have to inspect their backing element in the frame.
ASSERT(!is_copy());
if (!is_constant()) return NumberInfoField::decode(value_);
if (!is_constant()) {
return NumberInfo::FromInt(NumberInfoField::decode(value_));
}
Handle<Object> value = handle();
if (value->IsSmi()) return NumberInfo::kSmi;
if (value->IsHeapNumber()) return NumberInfo::kHeapNumber;
return NumberInfo::kUnknown;
if (value->IsSmi()) return NumberInfo::Smi();
if (value->IsHeapNumber()) return NumberInfo::HeapNumber();
return NumberInfo::Unknown();
}
inline void set_number_info(NumberInfo::Type info) {
inline void set_number_info(NumberInfo info) {
// Copied elements do not have number info. Instead
// we have to inspect their backing element in the frame.
ASSERT(!is_copy());
value_ = value_ & ~NumberInfoField::mask();
value_ = value_ | NumberInfoField::encode(info);
value_ = value_ | NumberInfoField::encode(info.ToInt());
}
// The default constructor creates an invalid frame element.
@ -77,7 +80,7 @@ class FrameElement BASE_EMBEDDED {
value_ = TypeField::encode(INVALID)
| CopiedField::encode(false)
| SyncedField::encode(false)
| NumberInfoField::encode(NumberInfo::kUninitialized)
| NumberInfoField::encode(NumberInfo::Uninitialized().ToInt())
| DataField::encode(0);
}
@ -88,7 +91,7 @@ class FrameElement BASE_EMBEDDED {
}
// Factory function to construct an in-memory frame element.
static FrameElement MemoryElement(NumberInfo::Type info) {
static FrameElement MemoryElement(NumberInfo info) {
FrameElement result(MEMORY, no_reg, SYNCED, info);
return result;
}
@ -96,7 +99,7 @@ class FrameElement BASE_EMBEDDED {
// Factory function to construct an in-register frame element.
static FrameElement RegisterElement(Register reg,
SyncFlag is_synced,
NumberInfo::Type info) {
NumberInfo info) {
return FrameElement(REGISTER, reg, is_synced, info);
}
@ -210,11 +213,11 @@ class FrameElement BASE_EMBEDDED {
FrameElement(Type type,
Register reg,
SyncFlag is_synced,
NumberInfo::Type info) {
NumberInfo info) {
value_ = TypeField::encode(type)
| CopiedField::encode(false)
| SyncedField::encode(is_synced != NOT_SYNCED)
| NumberInfoField::encode(info)
| NumberInfoField::encode(info.ToInt())
| DataField::encode(reg.code_ > 0 ? reg.code_ : 0);
}
@ -223,7 +226,7 @@ class FrameElement BASE_EMBEDDED {
value_ = TypeField::encode(CONSTANT)
| CopiedField::encode(false)
| SyncedField::encode(is_synced != NOT_SYNCED)
| NumberInfoField::encode(NumberInfo::kUninitialized)
| NumberInfoField::encode(NumberInfo::Uninitialized().ToInt())
| DataField::encode(ConstantList()->length());
ConstantList()->Add(value);
}
@ -252,8 +255,8 @@ class FrameElement BASE_EMBEDDED {
class TypeField: public BitField<Type, 0, 3> {};
class CopiedField: public BitField<bool, 3, 1> {};
class SyncedField: public BitField<bool, 4, 1> {};
class NumberInfoField: public BitField<NumberInfo::Type, 5, 3> {};
class DataField: public BitField<uint32_t, 8, 32 - 8> {};
class NumberInfoField: public BitField<int, 5, 4> {};
class DataField: public BitField<uint32_t, 9, 32 - 9> {};
friend class VirtualFrame;
};

1
deps/v8/src/frames.cc

@ -32,7 +32,6 @@
#include "scopeinfo.h"
#include "string-stream.h"
#include "top.h"
#include "zone-inl.h"
namespace v8 {
namespace internal {

2
deps/v8/src/globals.h

@ -261,6 +261,8 @@ template<class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
class Script;
class Slot;
class Smi;
template <typename Config, class Allocator = FreeStoreAllocationPolicy>
class SplayTree;
class Statement;
class String;
class Struct;

15
deps/v8/src/handles.cc

@ -203,7 +203,7 @@ void TransformToFastProperties(Handle<JSObject> object,
void FlattenString(Handle<String> string) {
CALL_HEAP_FUNCTION_VOID(string->TryFlattenIfNotFlat());
CALL_HEAP_FUNCTION_VOID(string->TryFlatten());
ASSERT(string->IsFlat());
}
@ -283,6 +283,12 @@ Handle<Object> GetProperty(Handle<Object> obj,
}
Handle<Object> GetElement(Handle<Object> obj,
uint32_t index) {
CALL_HEAP_FUNCTION(Runtime::GetElement(obj, index), Object);
}
Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<String> name,
@ -362,8 +368,11 @@ Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index) {
}
Handle<String> SubString(Handle<String> str, int start, int end) {
CALL_HEAP_FUNCTION(str->SubString(start, end), String);
Handle<String> SubString(Handle<String> str,
int start,
int end,
PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(str->SubString(start, end, pretenure), String);
}

8
deps/v8/src/handles.h

@ -233,6 +233,9 @@ Handle<Object> GetProperty(Handle<JSObject> obj,
Handle<Object> GetProperty(Handle<Object> obj,
Handle<Object> key);
Handle<Object> GetElement(Handle<Object> obj,
uint32_t index);
Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<String> name,
@ -287,7 +290,10 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first,
Handle<FixedArray> second);
Handle<String> SubString(Handle<String> str, int start, int end);
Handle<String> SubString(Handle<String> str,
int start,
int end,
PretenureFlag pretenure = NOT_TENURED);
// Sets the expected number of properties for the function's instances.

12
deps/v8/src/heap-inl.h

@ -187,6 +187,18 @@ void Heap::RecordWrite(Address address, int offset) {
}
void Heap::RecordWrites(Address address, int start, int len) {
if (new_space_.Contains(address)) return;
ASSERT(!new_space_.FromSpaceContains(address));
for (int offset = start;
offset < start + len * kPointerSize;
offset += kPointerSize) {
SLOW_ASSERT(Contains(address + offset));
Page::SetRSet(address, offset);
}
}
OldSpace* Heap::TargetSpace(HeapObject* object) {
InstanceType type = object->map()->instance_type();
AllocationSpace space = TargetSpaceId(type);

1
deps/v8/src/heap-profiler.cc

@ -31,6 +31,7 @@
#include "frames-inl.h"
#include "global-handles.h"
#include "string-stream.h"
#include "zone-inl.h"
namespace v8 {
namespace internal {

2
deps/v8/src/heap-profiler.h

@ -28,6 +28,8 @@
#ifndef V8_HEAP_PROFILER_H_
#define V8_HEAP_PROFILER_H_
#include "zone.h"
namespace v8 {
namespace internal {

119
deps/v8/src/heap.cc

@ -46,6 +46,7 @@
#include "arm/regexp-macro-assembler-arm.h"
#endif
namespace v8 {
namespace internal {
@ -371,11 +372,6 @@ void Heap::CollectAllGarbage(bool force_compaction) {
}
void Heap::NotifyContextDisposed() {
contexts_disposed_++;
}
bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
// The VM is in the GC state until exiting this function.
VMState state(GC);
@ -545,12 +541,22 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
VerifySymbolTable();
if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
ASSERT(!allocation_allowed_);
GCTracer::ExternalScope scope(tracer);
global_gc_prologue_callback_();
}
EnsureFromSpaceIsCommitted();
// Perform mark-sweep with optional compaction.
if (collector == MARK_COMPACTOR) {
MarkCompact(tracer);
}
// Always perform a scavenge to make room in new space.
Scavenge();
// Update the old space promotion limits after the scavenge due to
// promotions during scavenge.
if (collector == MARK_COMPACTOR) {
int old_gen_size = PromotedSpaceSize();
old_gen_promotion_limit_ =
old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
@ -558,12 +564,12 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
old_gen_exhausted_ = false;
}
Scavenge();
Counters::objs_since_last_young.Set(0);
if (collector == MARK_COMPACTOR) {
DisableAssertNoAllocation allow_allocation;
GCTracer::ExternalScope scope(tracer);
GlobalHandles::PostGarbageCollectionProcessing();
}
@ -578,6 +584,7 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
ASSERT(!allocation_allowed_);
GCTracer::ExternalScope scope(tracer);
global_gc_epilogue_callback_();
}
VerifySymbolTable();
@ -1209,6 +1216,16 @@ Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
}
Object* Heap::AllocateCodeCache() {
Object* result = AllocateStruct(CODE_CACHE_TYPE);
if (result->IsFailure()) return result;
CodeCache* code_cache = CodeCache::cast(result);
code_cache->set_default_cache(empty_fixed_array());
code_cache->set_normal_type_cache(undefined_value());
return code_cache;
}
const Heap::StringTypeTable Heap::string_type_table[] = {
#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
{type, size, k##camel_name##MapRootIndex},
@ -1625,7 +1642,7 @@ bool Heap::CreateInitialObjects() {
if (InitializeNumberStringCache()->IsFailure()) return false;
// Allocate cache for single character strings.
obj = AllocateFixedArray(String::kMaxAsciiCharCode+1);
obj = AllocateFixedArray(String::kMaxAsciiCharCode+1, TENURED);
if (obj->IsFailure()) return false;
set_single_character_string_cache(FixedArray::cast(obj));
@ -1659,7 +1676,7 @@ Object* Heap::InitializeNumberStringCache() {
// max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
int number_string_cache_size = max_semispace_size_ / 512;
number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
Object* obj = AllocateFixedArray(number_string_cache_size * 2);
Object* obj = AllocateFixedArray(number_string_cache_size * 2, TENURED);
if (!obj->IsFailure()) set_number_string_cache(FixedArray::cast(obj));
return obj;
}
@ -1982,7 +1999,8 @@ Object* Heap::AllocateConsString(String* first, String* second) {
Object* Heap::AllocateSubString(String* buffer,
int start,
int end) {
int end,
PretenureFlag pretenure) {
int length = end - start;
if (length == 1) {
@ -1998,16 +2016,13 @@ Object* Heap::AllocateSubString(String* buffer,
}
// Make an attempt to flatten the buffer to reduce access time.
if (!buffer->IsFlat()) {
buffer->TryFlatten();
}
buffer->TryFlatten();
Object* result = buffer->IsAsciiRepresentation()
? AllocateRawAsciiString(length)
: AllocateRawTwoByteString(length);
? AllocateRawAsciiString(length, pretenure )
: AllocateRawTwoByteString(length, pretenure);
if (result->IsFailure()) return result;
String* string_result = String::cast(result);
// Copy the characters into the new object.
if (buffer->IsAsciiRepresentation()) {
ASSERT(string_result->IsAsciiRepresentation());
@ -2957,6 +2972,18 @@ Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
}
Object* Heap::AllocateUninitializedFixedArray(int length) {
if (length == 0) return empty_fixed_array();
Object* obj = AllocateRawFixedArray(length);
if (obj->IsFailure()) return obj;
reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
FixedArray::cast(obj)->set_length(length);
return obj;
}
Object* Heap::AllocateFixedArrayWithHoles(int length) {
if (length == 0) return empty_fixed_array();
Object* result = AllocateRawFixedArray(length);
@ -2966,18 +2993,17 @@ Object* Heap::AllocateFixedArrayWithHoles(int length) {
FixedArray* array = FixedArray::cast(result);
array->set_length(length);
// Initialize body.
Object* value = the_hole_value();
for (int index = 0; index < length; index++) {
ASSERT(!Heap::InNewSpace(value)); // value = the hole
array->set(index, value, SKIP_WRITE_BARRIER);
}
ASSERT(!Heap::InNewSpace(the_hole_value()));
MemsetPointer(HeapObject::RawField(array, FixedArray::kHeaderSize),
the_hole_value(),
length);
}
return result;
}
Object* Heap::AllocateHashTable(int length) {
Object* result = Heap::AllocateFixedArray(length);
Object* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
Object* result = Heap::AllocateFixedArray(length, pretenure);
if (result->IsFailure()) return result;
reinterpret_cast<Array*>(result)->set_map(hash_table_map());
ASSERT(result->IsHashTable());
@ -3060,13 +3086,7 @@ bool Heap::IdleNotification() {
static int number_idle_notifications = 0;
static int last_gc_count = gc_count_;
if (!FLAG_expose_gc && (contexts_disposed_ > 0)) {
HistogramTimerScope scope(&Counters::gc_context);
CollectAllGarbage(false);
ASSERT(contexts_disposed_ == 0);
return false;
}
bool uncommit = true;
bool finished = false;
if (last_gc_count == gc_count_) {
@ -3077,7 +3097,12 @@ bool Heap::IdleNotification() {
}
if (number_idle_notifications == kIdlesBeforeScavenge) {
CollectGarbage(0, NEW_SPACE);
if (contexts_disposed_ > 0) {
HistogramTimerScope scope(&Counters::gc_context);
CollectAllGarbage(false);
} else {
CollectGarbage(0, NEW_SPACE);
}
new_space_.Shrink();
last_gc_count = gc_count_;
@ -3097,10 +3122,29 @@ bool Heap::IdleNotification() {
last_gc_count = gc_count_;
number_idle_notifications = 0;
finished = true;
} else if (contexts_disposed_ > 0) {
if (FLAG_expose_gc) {
contexts_disposed_ = 0;
} else {
HistogramTimerScope scope(&Counters::gc_context);
CollectAllGarbage(false);
last_gc_count = gc_count_;
}
// If this is the first idle notification, we reset the
// notification count to avoid letting idle notifications for
// context disposal garbage collections start a potentially too
// aggressive idle GC cycle.
if (number_idle_notifications <= 1) {
number_idle_notifications = 0;
uncommit = false;
}
}
// Uncommit unused memory in new space.
Heap::UncommitFromSpace();
// Make sure that we have no pending context disposals and
// conditionally uncommit from space.
ASSERT(contexts_disposed_ == 0);
if (uncommit) Heap::UncommitFromSpace();
return finished;
}
@ -4062,6 +4106,7 @@ void Heap::TracePathToGlobal() {
GCTracer::GCTracer()
: start_time_(0.0),
start_size_(0.0),
external_time_(0.0),
gc_count_(0),
full_gc_count_(0),
is_compacting_(false),
@ -4079,10 +4124,12 @@ GCTracer::GCTracer()
GCTracer::~GCTracer() {
if (!FLAG_trace_gc) return;
// Printf ONE line iff flag is set.
PrintF("%s %.1f -> %.1f MB, %d ms.\n",
CollectorString(),
start_size_, SizeOfHeapObjects(),
static_cast<int>(OS::TimeCurrentMillis() - start_time_));
int time = static_cast<int>(OS::TimeCurrentMillis() - start_time_);
int external_time = static_cast<int>(external_time_);
PrintF("%s %.1f -> %.1f MB, ",
CollectorString(), start_size_, SizeOfHeapObjects());
if (external_time > 0) PrintF("%d / ", external_time);
PrintF("%d ms.\n", time);
#if defined(ENABLE_LOGGING_AND_PROFILING)
Heap::PrintShortHeapStatistics();

63
deps/v8/src/heap.h

@ -30,12 +30,15 @@
#include <math.h>
#include "zone-inl.h"
#include "splay-tree-inl.h"
#include "v8-counters.h"
namespace v8 {
namespace internal {
// Forward declarations.
class ZoneScopeInfo;
// Defines all the roots in Heap.
#define UNCONDITIONAL_STRONG_ROOT_LIST(V) \
/* Put the byte array map early. We need it to be in place by the time */ \
@ -345,6 +348,9 @@ class Heap : public AllStatic {
// Allocate a map for the specified function
static Object* AllocateInitialMap(JSFunction* fun);
// Allocates an empty code cache.
static Object* AllocateCodeCache();
// Allocates and fully initializes a String. There are two String
// encodings: ASCII and two byte. One should choose between the three string
// allocation functions based on the encoding of the string buffer used to
@ -449,9 +455,16 @@ class Heap : public AllStatic {
// failed.
// Please note this does not perform a garbage collection.
static Object* AllocateFixedArray(int length, PretenureFlag pretenure);
// Allocate uninitialized, non-tenured fixed array with length elements.
// Allocates a fixed array initialized with undefined values
static Object* AllocateFixedArray(int length);
// Allocates an uninitialized fixed array. It must be filled by the caller.
//
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
static Object* AllocateUninitializedFixedArray(int length);
// Make a copy of src and return it. Returns
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
static Object* CopyFixedArray(FixedArray* src);
@ -464,7 +477,8 @@ class Heap : public AllStatic {
// AllocateHashTable is identical to AllocateFixedArray except
// that the resulting object has hash_table_map as map.
static Object* AllocateHashTable(int length);
static Object* AllocateHashTable(int length,
PretenureFlag pretenure = NOT_TENURED);
// Allocate a global (but otherwise uninitialized) context.
static Object* AllocateGlobalContext();
@ -556,7 +570,8 @@ class Heap : public AllStatic {
// Please note this does not perform a garbage collection.
static Object* AllocateSubString(String* buffer,
int start,
int end);
int end,
PretenureFlag pretenure = NOT_TENURED);
// Allocate a new external string object, which is backed by a string
// resource that resides outside the V8 heap.
@ -633,7 +648,7 @@ class Heap : public AllStatic {
static void CollectAllGarbage(bool force_compaction);
// Notify the heap that a context has been disposed.
static void NotifyContextDisposed();
static int NotifyContextDisposed() { return ++contexts_disposed_; }
// Utility to invoke the scavenger. This is needed in test code to
// ensure correct callback for weak global handles.
@ -765,6 +780,9 @@ class Heap : public AllStatic {
// Write barrier support for address[offset] = o.
static inline void RecordWrite(Address address, int offset);
// Write barrier support for address[start : start + len[ = o.
static inline void RecordWrites(Address address, int start, int len);
// Given an address occupied by a live code object, return that object.
static Object* FindCodeObject(Address a);
@ -1518,8 +1536,23 @@ class DisableAssertNoAllocation {
class GCTracer BASE_EMBEDDED {
public:
GCTracer();
// Time spent while in the external scope counts towards the
// external time in the tracer and will be reported separately.
class ExternalScope BASE_EMBEDDED {
public:
explicit ExternalScope(GCTracer* tracer) : tracer_(tracer) {
start_time_ = OS::TimeCurrentMillis();
}
~ExternalScope() {
tracer_->external_time_ += OS::TimeCurrentMillis() - start_time_;
}
private:
GCTracer* tracer_;
double start_time_;
};
GCTracer();
~GCTracer();
// Sets the collector.
@ -1553,6 +1586,9 @@ class GCTracer BASE_EMBEDDED {
double start_size_; // Size of objects in heap set in constructor.
GarbageCollector collector_; // Type of collector.
// Keep track of the amount of time spent in external callbacks.
double external_time_;
// A count (including this one, eg, the first collection is 1) of the
// number of garbage collections.
int gc_count_;
@ -1608,6 +1644,7 @@ class TranscendentalCache {
if (e.in[0] == c.integers[0] &&
e.in[1] == c.integers[1]) {
ASSERT(e.output != NULL);
Counters::transcendental_cache_hit.Increment();
return e.output;
}
double answer = Calculate(input);
@ -1617,6 +1654,7 @@ class TranscendentalCache {
elements_[hash].in[1] = c.integers[1];
elements_[hash].output = heap_number;
}
Counters::transcendental_cache_miss.Increment();
return heap_number;
}
@ -1657,6 +1695,17 @@ class TranscendentalCache {
hash ^= hash >> 8;
return (hash & (kCacheSize - 1));
}
static Address cache_array_address() {
// Used to create an external reference.
return reinterpret_cast<Address>(caches_);
}
// Allow access to the caches_ array as an ExternalReference.
friend class ExternalReference;
// Inline implementation of the caching.
friend class TranscendentalCacheStub;
static TranscendentalCache* caches_[kNumberOfCaches];
Element elements_[kCacheSize];
Type type_;

114
deps/v8/src/ia32/assembler-ia32.cc

@ -753,6 +753,13 @@ void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
}
void Assembler::cld() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xFC);
}
void Assembler::rep_movs() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -761,6 +768,14 @@ void Assembler::rep_movs() {
}
void Assembler::rep_stos() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3);
EMIT(0xAB);
}
void Assembler::xchg(Register dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -1637,6 +1652,13 @@ void Assembler::fld(int i) {
}
void Assembler::fstp(int i) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_farith(0xDD, 0xD8, i);
}
void Assembler::fld1() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -1645,6 +1667,14 @@ void Assembler::fld1() {
}
void Assembler::fldpi() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xD9);
EMIT(0xEB);
}
void Assembler::fldz() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -1685,6 +1715,14 @@ void Assembler::fstp_d(const Operand& adr) {
}
void Assembler::fst_d(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xDD);
emit_operand(edx, adr);
}
void Assembler::fild_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -2012,6 +2050,17 @@ void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
}
void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3);
EMIT(0x0F);
EMIT(0x5A);
emit_sse_operand(dst, src);
}
void Assembler::addsd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@ -2067,6 +2116,16 @@ void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
}
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
EMIT(0x0F);
EMIT(0x51);
emit_sse_operand(dst, src);
}
void Assembler::comisd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@ -2078,6 +2137,17 @@ void Assembler::comisd(XMMRegister dst, XMMRegister src) {
}
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x2E);
emit_sse_operand(dst, src);
}
void Assembler::movdqa(const Operand& dst, XMMRegister src ) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@ -2157,6 +2227,50 @@ void Assembler::movsd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
void Assembler::movsd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
EMIT(0x0F);
EMIT(0x10);
emit_sse_operand(dst, src);
}
void Assembler::movd(XMMRegister dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x6E);
emit_sse_operand(dst, src);
}
void Assembler::pxor(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0xEF);
emit_sse_operand(dst, src);
}
void Assembler::ptest(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x38);
EMIT(0x17);
emit_sse_operand(dst, src);
}
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };

19
deps/v8/src/ia32/assembler-ia32.h

@ -93,7 +93,7 @@ const Register no_reg = { -1 };
struct XMMRegister {
bool is_valid() const { return 0 <= code_ && code_ < 2; } // currently
bool is_valid() const { return 0 <= code_ && code_ < 8; }
int code() const {
ASSERT(is_valid());
return code_;
@ -542,8 +542,12 @@ class Assembler : public Malloced {
void cmov(Condition cc, Register dst, Handle<Object> handle);
void cmov(Condition cc, Register dst, const Operand& src);
// Flag management.
void cld();
// Repetitive string instructions.
void rep_movs();
void rep_stos();
// Exchange two registers
void xchg(Register dst, Register src);
@ -668,6 +672,7 @@ class Assembler : public Malloced {
void call(Label* L);
void call(byte* entry, RelocInfo::Mode rmode);
void call(const Operand& adr);
void call(const ExternalReference& target);
void call(Handle<Code> code, RelocInfo::Mode rmode);
// Jumps
@ -683,15 +688,18 @@ class Assembler : public Malloced {
// Floating-point operations
void fld(int i);
void fstp(int i);
void fld1();
void fldz();
void fldpi();
void fld_s(const Operand& adr);
void fld_d(const Operand& adr);
void fstp_s(const Operand& adr);
void fstp_d(const Operand& adr);
void fst_d(const Operand& adr);
void fild_s(const Operand& adr);
void fild_d(const Operand& adr);
@ -750,14 +758,17 @@ class Assembler : public Malloced {
void cvttsd2si(Register dst, const Operand& src);
void cvtsi2sd(XMMRegister dst, const Operand& src);
void cvtss2sd(XMMRegister dst, XMMRegister src);
void addsd(XMMRegister dst, XMMRegister src);
void subsd(XMMRegister dst, XMMRegister src);
void mulsd(XMMRegister dst, XMMRegister src);
void divsd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
void comisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src);
void movdqa(XMMRegister dst, const Operand& src);
void movdqa(const Operand& dst, XMMRegister src);
@ -768,6 +779,12 @@ class Assembler : public Malloced {
void movdbl(XMMRegister dst, const Operand& src);
void movdbl(const Operand& dst, XMMRegister src);
void movd(XMMRegister dst, const Operand& src);
void movsd(XMMRegister dst, XMMRegister src);
void pxor(XMMRegister dst, XMMRegister src);
void ptest(XMMRegister dst, XMMRegister src);
// Debugging
void Print();

95
deps/v8/src/ia32/builtins-ia32.cc

@ -63,10 +63,10 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
}
// JumpToRuntime expects eax to contain the number of arguments
// JumpToExternalReference expects eax to contain the number of arguments
// including the receiver and the extra arguments.
__ add(Operand(eax), Immediate(num_extra_args + 1));
__ JumpToRuntime(ExternalReference(id));
__ JumpToExternalReference(ExternalReference(id));
}
@ -797,38 +797,23 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// register elements_array is scratched.
static void AllocateJSArray(MacroAssembler* masm,
Register array_function, // Array function.
Register array_size, // As a smi.
Register array_size, // As a smi, cannot be 0.
Register result,
Register elements_array,
Register elements_array_end,
Register scratch,
bool fill_with_hole,
Label* gc_required) {
Label not_empty, allocated;
ASSERT(scratch.is(edi)); // rep stos destination
ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count
// Load the initial map from the array function.
__ mov(elements_array,
FieldOperand(array_function,
JSFunction::kPrototypeOrInitialMapOffset));
// Check whether an empty sized array is requested.
__ test(array_size, Operand(array_size));
__ j(not_zero, &not_empty);
// If an empty array is requested allocate a small elements array anyway. This
// keeps the code below free of special casing for the empty array.
int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
__ AllocateInNewSpace(size,
result,
elements_array_end,
scratch,
gc_required,
TAG_OBJECT);
__ jmp(&allocated);
// Allocate the JSArray object together with space for a FixedArray with the
// requested elements.
__ bind(&not_empty);
ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
times_half_pointer_size, // array_size is a smi.
@ -845,7 +830,6 @@ static void AllocateJSArray(MacroAssembler* masm,
// elements_array: initial map
// elements_array_end: start of next object
// array_size: size of array (smi)
__ bind(&allocated);
__ mov(FieldOperand(result, JSObject::kMapOffset), elements_array);
__ mov(elements_array, Factory::empty_fixed_array());
__ mov(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
@ -869,15 +853,6 @@ static void AllocateJSArray(MacroAssembler* masm,
__ SmiUntag(array_size); // Convert from smi to value.
__ mov(FieldOperand(elements_array, JSObject::kMapOffset),
Factory::fixed_array_map());
Label not_empty_2, fill_array;
__ test(array_size, Operand(array_size));
__ j(not_zero, &not_empty_2);
// Length of the FixedArray is the number of pre-allocated elements even
// though the actual JSArray has length 0.
__ mov(FieldOperand(elements_array, Array::kLengthOffset),
Immediate(kPreallocatedArrayElements));
__ jmp(&fill_array);
__ bind(&not_empty_2);
// For non-empty JSArrays the length of the FixedArray and the JSArray is the
// same.
__ mov(FieldOperand(elements_array, Array::kLengthOffset), array_size);
@ -885,20 +860,18 @@ static void AllocateJSArray(MacroAssembler* masm,
// Fill the allocated FixedArray with the hole value if requested.
// result: JSObject
// elements_array: elements array
// elements_array_end: start of next object
__ bind(&fill_array);
if (fill_with_hole) {
Label loop, entry;
__ mov(scratch, Factory::the_hole_value());
__ lea(elements_array, Operand(elements_array,
FixedArray::kHeaderSize - kHeapObjectTag));
__ jmp(&entry);
__ bind(&loop);
__ mov(Operand(elements_array, 0), scratch);
__ add(Operand(elements_array), Immediate(kPointerSize));
__ bind(&entry);
__ cmp(elements_array, Operand(elements_array_end));
__ j(below, &loop);
__ lea(edi, Operand(elements_array,
FixedArray::kHeaderSize - kHeapObjectTag));
__ push(eax);
__ mov(eax, Factory::the_hole_value());
__ cld();
__ rep_stos();
// Restore saved registers.
__ pop(eax);
}
}
@ -920,7 +893,8 @@ static void AllocateJSArray(MacroAssembler* masm,
static void ArrayNativeCode(MacroAssembler* masm,
bool construct_call,
Label* call_generic_code) {
Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call;
Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call,
empty_array, not_empty_array;
// Push the constructor and argc. No need to tag argc as a smi, as there will
// be no garbage collection with this on the stack.
@ -936,6 +910,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ test(eax, Operand(eax));
__ j(not_zero, &argc_one_or_more);
__ bind(&empty_array);
// Handle construction of an empty array.
AllocateEmptyJSArray(masm,
edi,
@ -958,30 +933,46 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ cmp(eax, 1);
__ j(not_equal, &argc_two_or_more);
ASSERT(kSmiTag == 0);
__ test(Operand(esp, (push_count + 1) * kPointerSize),
Immediate(kIntptrSignBit | kSmiTagMask));
__ mov(ecx, Operand(esp, (push_count + 1) * kPointerSize));
__ test(ecx, Operand(ecx));
__ j(not_zero, &not_empty_array);
// The single argument passed is zero, so we jump to the code above used to
// handle the case of no arguments passed. To adapt the stack for that we move
// the return address and the pushed constructor (if pushed) one stack slot up
// thereby removing the passed argument. Argc is also on the stack - at the
// bottom - and it needs to be changed from 1 to 0 to have the call into the
// runtime system work in case a GC is required.
for (int i = push_count; i > 0; i--) {
__ mov(eax, Operand(esp, i * kPointerSize));
__ mov(Operand(esp, (i + 1) * kPointerSize), eax);
}
__ add(Operand(esp), Immediate(2 * kPointerSize)); // Drop two stack slots.
__ push(Immediate(0)); // Treat this as a call with argc of zero.
__ jmp(&empty_array);
__ bind(&not_empty_array);
__ test(ecx, Immediate(kIntptrSignBit | kSmiTagMask));
__ j(not_zero, &prepare_generic_code_call);
// Handle construction of an empty array of a certain size. Get the size from
// the stack and bail out if size is to large to actually allocate an elements
// array.
__ mov(edx, Operand(esp, (push_count + 1) * kPointerSize));
ASSERT(kSmiTag == 0);
__ cmp(edx, JSObject::kInitialMaxFastElementArray << kSmiTagSize);
__ cmp(ecx, JSObject::kInitialMaxFastElementArray << kSmiTagSize);
__ j(greater_equal, &prepare_generic_code_call);
// edx: array_size (smi)
// edi: constructor
// esp[0]: argc
// esp[0]: argc (cannot be 0 here)
// esp[4]: constructor (only if construct_call)
// esp[8]: return address
// esp[C]: argument
AllocateJSArray(masm,
edi,
edx,
ecx,
eax,
ebx,
ecx,
edx,
edi,
true,
&prepare_generic_code_call);

1750
deps/v8/src/ia32/codegen-ia32.cc

File diff suppressed because it is too large

97
deps/v8/src/ia32/codegen-ia32.h

@ -28,6 +28,8 @@
#ifndef V8_IA32_CODEGEN_IA32_H_
#define V8_IA32_CODEGEN_IA32_H_
#include "ic-inl.h"
namespace v8 {
namespace internal {
@ -494,8 +496,8 @@ class CodeGenerator: public AstVisitor {
// To prevent long attacker-controlled byte sequences, integer constants
// from the JavaScript source are loaded in two parts if they are larger
// than 16 bits.
static const int kMaxSmiInlinedBits = 16;
// than 17 bits.
static const int kMaxSmiInlinedBits = 17;
bool IsUnsafeSmi(Handle<Object> value);
// Load an integer constant x into a register target or into the stack using
// at most 16 bits of user-controlled data per assembly operation.
@ -563,6 +565,9 @@ class CodeGenerator: public AstVisitor {
// Fast support for charCodeAt(n).
void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
// Fast support for string.charAt(n) and string[n].
void GenerateCharFromCode(ZoneList<Expression*>* args);
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
@ -588,6 +593,16 @@ class CodeGenerator: public AstVisitor {
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
// Fast support for Math.pow().
void GenerateMathPow(ZoneList<Expression*>* args);
// Fast call to transcendental functions.
void GenerateMathSin(ZoneList<Expression*>* args);
void GenerateMathCos(ZoneList<Expression*>* args);
// Fast case for sqrt
void GenerateMathSqrt(ZoneList<Expression*>* args);
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@ -655,6 +670,22 @@ class CodeGenerator: public AstVisitor {
};
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
class TranscendentalCacheStub: public CodeStub {
public:
explicit TranscendentalCacheStub(TranscendentalCache::Type type)
: type_(type) {}
void Generate(MacroAssembler* masm);
private:
TranscendentalCache::Type type_;
Major MajorKey() { return TranscendentalCache; }
int MinorKey() { return type_; }
Runtime::FunctionId RuntimeFunction();
void GenerateOperation(MacroAssembler* masm);
};
// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
enum GenericBinaryFlags {
NO_GENERIC_BINARY_FLAGS = 0,
@ -667,18 +698,35 @@ class GenericBinaryOpStub: public CodeStub {
GenericBinaryOpStub(Token::Value op,
OverwriteMode mode,
GenericBinaryFlags flags,
NumberInfo::Type operands_type = NumberInfo::kUnknown)
NumberInfo operands_type)
: op_(op),
mode_(mode),
flags_(flags),
args_in_registers_(false),
args_reversed_(false),
name_(NULL),
operands_type_(operands_type) {
static_operands_type_(operands_type),
runtime_operands_type_(BinaryOpIC::DEFAULT),
name_(NULL) {
if (static_operands_type_.IsSmi()) {
mode_ = NO_OVERWRITE;
}
use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
flags_(FlagBits::decode(key)),
args_in_registers_(ArgsInRegistersBits::decode(key)),
args_reversed_(ArgsReversedBits::decode(key)),
use_sse3_(SSE3Bits::decode(key)),
static_operands_type_(NumberInfo::ExpandedRepresentation(
StaticTypeInfoBits::decode(key))),
runtime_operands_type_(runtime_operands_type),
name_(NULL) {
}
// Generate code to call the stub with the supplied arguments. This will add
// code at the call site to prepare arguments either in registers or on the
// stack together with the actual call.
@ -698,8 +746,14 @@ class GenericBinaryOpStub: public CodeStub {
bool args_in_registers_; // Arguments passed in registers not on the stack.
bool args_reversed_; // Left and right argument are swapped.
bool use_sse3_;
// Number type information of operands, determined by code generator.
NumberInfo static_operands_type_;
// Operand type information determined at runtime.
BinaryOpIC::TypeInfo runtime_operands_type_;
char* name_;
NumberInfo::Type operands_type_; // Number type information of operands.
const char* GetName();
@ -713,29 +767,32 @@ class GenericBinaryOpStub: public CodeStub {
static_cast<int>(flags_),
static_cast<int>(args_in_registers_),
static_cast<int>(args_reversed_),
NumberInfo::ToString(operands_type_));
static_operands_type_.ToString());
}
#endif
// Minor key encoding in 16 bits NNNFRASOOOOOOOMM.
// Minor key encoding in 18 bits RRNNNFRASOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class SSE3Bits: public BitField<bool, 9, 1> {};
class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
class ArgsReversedBits: public BitField<bool, 11, 1> {};
class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
class NumberInfoBits: public BitField<NumberInfo::Type, 13, 3> {};
class StaticTypeInfoBits: public BitField<int, 13, 3> {};
class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 2> {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
// Encode the parameters in a unique 18 bit value.
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| FlagBits::encode(flags_)
| SSE3Bits::encode(use_sse3_)
| ArgsInRegistersBits::encode(args_in_registers_)
| ArgsReversedBits::encode(args_reversed_)
| NumberInfoBits::encode(operands_type_);
| StaticTypeInfoBits::encode(
static_operands_type_.ThreeBitRepresentation())
| RuntimeTypeInfoBits::encode(runtime_operands_type_);
}
void Generate(MacroAssembler* masm);
@ -743,6 +800,8 @@ class GenericBinaryOpStub: public CodeStub {
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
void GenerateRegisterArgsPush(MacroAssembler* masm);
void GenerateTypeTransition(MacroAssembler* masm);
bool ArgsInRegistersSupported() {
return op_ == Token::ADD || op_ == Token::SUB
@ -757,6 +816,22 @@ class GenericBinaryOpStub: public CodeStub {
bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
bool HasArgsInRegisters() { return args_in_registers_; }
bool HasArgsReversed() { return args_reversed_; }
bool ShouldGenerateSmiCode() {
return HasSmiCodeInStub() &&
runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
runtime_operands_type_ != BinaryOpIC::STRINGS;
}
bool ShouldGenerateFPCode() {
return runtime_operands_type_ != BinaryOpIC::STRINGS;
}
virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
return BinaryOpIC::ToState(runtime_operands_type_);
}
};

9
deps/v8/src/ia32/debug-ia32.cc

@ -146,9 +146,10 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC load call (from ic-ia32.cc).
// ----------- S t a t e -------------
// No registers used on entry.
// -- edx : receiver
// -- eax : key
// -----------------------------------
Generate_DebugBreakCallHelper(masm, 0, false);
Generate_DebugBreakCallHelper(masm, eax.bit() | edx.bit(), false);
}
@ -156,10 +157,12 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC load call (from ic-ia32.cc).
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -----------------------------------
// Register eax contains an object that needs to be pushed on the
// expression stack of the fake JS frame.
Generate_DebugBreakCallHelper(masm, eax.bit(), false);
Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit() | edx.bit(), false);
}

44
deps/v8/src/ia32/disasm-ia32.cc

@ -89,6 +89,7 @@ static ByteMnemonic zero_operands_instr[] = {
{0x9E, "sahf", UNSET_OP_ORDER},
{0x99, "cdq", UNSET_OP_ORDER},
{0x9B, "fwait", UNSET_OP_ORDER},
{0xFC, "cld", UNSET_OP_ORDER},
{-1, "", UNSET_OP_ORDER}
};
@ -679,6 +680,7 @@ int DisassemblerIA32::MemoryFPUInstruction(int escape_opcode,
case 0xDD: switch (regop) {
case 0: mnem = "fld_d"; break;
case 2: mnem = "fstp"; break;
case 3: mnem = "fstp_d"; break;
default: UnimplementedInstruction();
}
@ -720,6 +722,7 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
case 0xE1: mnem = "fabs"; break;
case 0xE4: mnem = "ftst"; break;
case 0xE8: mnem = "fld1"; break;
case 0xEB: mnem = "fldpi"; break;
case 0xEE: mnem = "fldz"; break;
case 0xF5: mnem = "fprem1"; break;
case 0xF7: mnem = "fincstp"; break;
@ -1053,7 +1056,20 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
AppendToBuffer(",%s", NameOfCPURegister(regop));
} else if (*data == 0x0F) {
data++;
if (*data == 0x2F) {
if (*data == 0x38) {
data++;
if (*data == 0x17) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("ptest %s,%s",
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else {
UnimplementedInstruction();
}
} else if (*data == 0x2F) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
@ -1069,6 +1085,12 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (*data == 0x6E) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("movd %s,", NameOfXMMRegister(regop));
data += PrintRightOperand(data);
} else if (*data == 0x6F) {
data++;
int mod, regop, rm;
@ -1082,6 +1104,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
data += PrintRightOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (*data == 0xEF) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("pxor %s,%s",
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else {
UnimplementedInstruction();
}
@ -1168,6 +1198,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
const char* mnem = "?";
switch (b2) {
case 0x2A: mnem = "cvtsi2sd"; break;
case 0x51: mnem = "sqrtsd"; break;
case 0x58: mnem = "addsd"; break;
case 0x59: mnem = "mulsd"; break;
case 0x5C: mnem = "subsd"; break;
@ -1197,6 +1228,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
if (*(data+2) == 0x2C) {
data += 3;
data += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, data);
} else if (*(data+2) == 0x5A) {
data += 3;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("cvtss2sd %s,%s",
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (*(data+2) == 0x6F) {
data += 3;
int mod, regop, rm;
@ -1216,6 +1255,9 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else if (*(data+1) == 0xA5) {
data += 2;
AppendToBuffer("rep_movs");
} else if (*(data+1) == 0xAB) {
data += 2;
AppendToBuffer("rep_stos");
} else {
UnimplementedInstruction();
}

1
deps/v8/src/ia32/fast-codegen-ia32.cc

@ -621,6 +621,7 @@ void FastCodeGenerator::EmitBitOr() {
void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
ASSERT(info_ == NULL);
info_ = compilation_info;
Comment cmnt(masm_, "[ function compiled by fast code generator");
// Save the caller's frame pointer and set up our own.
Comment prologue_cmnt(masm(), ";; Prologue");

37
deps/v8/src/ia32/full-codegen-ia32.cc

@ -56,6 +56,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
ASSERT(info_ == NULL);
info_ = info;
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
if (mode == PRIMARY) {
__ push(ebp); // Caller's frame pointer.
@ -741,23 +742,22 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
// We are declaring a function or constant that rewrites to a
// property. Use (keyed) IC to set the initial value.
VisitForValue(prop->obj(), kStack);
VisitForValue(prop->key(), kStack);
if (decl->fun() != NULL) {
VisitForValue(prop->key(), kStack);
VisitForValue(decl->fun(), kAccumulator);
__ pop(ecx);
} else {
VisitForValue(prop->key(), kAccumulator);
__ mov(ecx, result_register());
__ mov(result_register(), Factory::the_hole_value());
}
__ pop(edx);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// Absence of a test eax instruction following the call
// indicates that none of the load was inlined.
__ nop();
// Value in eax is ignored (declarations are statements). Receiver
// and key on stack are discarded.
__ Drop(2);
}
}
}
@ -1130,7 +1130,8 @@ void FullCodeGenerator::EmitBinaryOp(Token::Value op,
__ push(result_register());
GenericBinaryOpStub stub(op,
NO_OVERWRITE,
NO_GENERIC_BINARY_FLAGS);
NO_GENERIC_BINARY_FLAGS,
NumberInfo::Unknown());
__ CallStub(&stub);
Apply(context, eax);
}
@ -1251,6 +1252,12 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ pop(result_register());
}
__ pop(ecx);
if (expr->ends_initialization_block()) {
__ mov(edx, Operand(esp, 0)); // Leave receiver on the stack for later.
} else {
__ pop(edx);
}
// Record source code position before IC call.
SetSourcePosition(expr->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
@ -1261,15 +1268,14 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
__ pop(edx);
__ push(eax); // Result of assignment, saved even if not needed.
// Receiver is under the key and value.
__ push(Operand(esp, 2 * kPointerSize));
__ push(edx);
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(eax);
}
// Receiver and key are still on stack.
DropAndApply(2, context_, eax);
Apply(context_, eax);
}
@ -1739,7 +1745,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call stub for +1/-1.
GenericBinaryOpStub stub(expr->binary_op(),
NO_OVERWRITE,
NO_GENERIC_BINARY_FLAGS);
NO_GENERIC_BINARY_FLAGS,
NumberInfo::Unknown());
stub.GenerateCall(masm(), eax, Smi::FromInt(1));
__ bind(&done);
@ -1777,18 +1784,20 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
__ pop(ecx);
__ pop(edx);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// This nop signals to the IC that there is no inlined code at the call
// site for it to patch.
__ nop();
if (expr->is_postfix()) {
__ Drop(2); // Result is on the stack under the key and the receiver.
// Result is on the stack
if (context_ != Expression::kEffect) {
ApplyTOS(context_);
}
} else {
DropAndApply(2, context_, eax);
Apply(context_, eax);
}
break;
}

279
deps/v8/src/ia32/ic-ia32.cc

@ -610,8 +610,9 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ push(ecx); // return address
// Perform tail call to the entry.
__ TailCallRuntime(ExternalReference(
IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1);
ExternalReference ref = ExternalReference(
IC_Utility(kKeyedLoadPropertyWithInterceptor));
__ TailCallExternalReference(ref, 2, 1);
__ bind(&slow);
GenerateMiss(masm);
@ -621,54 +622,41 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -- esp[4] : key
// -- esp[8] : receiver
// -----------------------------------
Label slow, fast, array, extra, check_pixel_array;
// Get the receiver from the stack.
__ mov(edx, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, key
// Check that the object isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &slow, not_taken);
// Get the map from the receiver.
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
__ movzx_b(ebx, FieldOperand(edi, Map::kBitFieldOffset));
__ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow, not_taken);
// Get the key from the stack.
__ mov(ebx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
// Check that the key is a smi.
__ test(ebx, Immediate(kSmiTagMask));
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &slow, not_taken);
// Get the instance type from the map of the receiver.
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
// Check if the object is a JS array or not.
__ cmp(ecx, JS_ARRAY_TYPE);
__ CmpInstanceType(edi, JS_ARRAY_TYPE);
__ j(equal, &array);
// Check that the object is some kind of JS object.
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
__ j(less, &slow, not_taken);
__ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
__ j(below, &slow, not_taken);
// Object case: Check key against length in the elements array.
// eax: value
// edx: JSObject
// ebx: index (as a smi)
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
// ecx: key (a smi)
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
Immediate(Factory::fixed_array_map()));
__ j(not_equal, &check_pixel_array, not_taken);
// Untag the key (for checking against untagged length in the fixed array).
__ mov(edx, Operand(ebx));
__ sar(edx, kSmiTagSize); // untag the index and use it for the comparison
__ cmp(edx, FieldOperand(ecx, Array::kLengthOffset));
// eax: value
// ecx: FixedArray
// ebx: index (as a smi)
__ CheckMap(edi, Factory::fixed_array_map(), &check_pixel_array, true);
__ mov(ebx, Operand(ecx));
__ SmiUntag(ebx);
__ cmp(ebx, FieldOperand(edi, Array::kLengthOffset));
__ j(below, &fast, taken);
// Slow case: call runtime.
@ -676,52 +664,51 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
GenerateRuntimeSetProperty(masm);
// Check whether the elements is a pixel array.
// eax: value
// ecx: elements array
// ebx: index (as a smi)
__ bind(&check_pixel_array);
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
Immediate(Factory::pixel_array_map()));
__ j(not_equal, &slow);
// eax: value
// ecx: key
// edx: receiver
// edi: elements array
__ CheckMap(edi, Factory::pixel_array_map(), &slow, true);
// Check that the value is a smi. If a conversion is needed call into the
// runtime to convert and clamp.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &slow);
__ sar(ebx, kSmiTagSize); // Untag the index.
__ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
__ mov(ebx, ecx);
__ SmiUntag(ebx);
__ cmp(ebx, FieldOperand(edi, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
__ mov(edx, eax); // Save the value.
__ sar(eax, kSmiTagSize); // Untag the value.
__ mov(ecx, eax); // Save the value. Key is not longer needed.
__ SmiUntag(ecx);
{ // Clamp the value to [0..255].
Label done;
__ test(eax, Immediate(0xFFFFFF00));
__ test(ecx, Immediate(0xFFFFFF00));
__ j(zero, &done);
__ setcc(negative, eax); // 1 if negative, 0 if positive.
__ dec_b(eax); // 0 if negative, 255 if positive.
__ setcc(negative, ecx); // 1 if negative, 0 if positive.
__ dec_b(ecx); // 0 if negative, 255 if positive.
__ bind(&done);
}
__ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
__ mov_b(Operand(ecx, ebx, times_1, 0), eax);
__ mov(eax, edx); // Return the original value.
__ ret(0);
__ mov(edi, FieldOperand(edi, PixelArray::kExternalPointerOffset));
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
__ ret(0); // Return value in eax.
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
// element to the array by writing to array[array.length].
__ bind(&extra);
// eax: value
// edx: JSArray
// ecx: FixedArray
// ebx: index (as a smi)
// flags: compare (ebx, edx.length())
// edx: receiver, a JSArray
// ecx: key, a smi.
// edi: receiver->elements, a FixedArray
// flags: compare (ecx, edx.length())
__ j(not_equal, &slow, not_taken); // do not leave holes in the array
__ sar(ebx, kSmiTagSize); // untag
__ cmp(ebx, FieldOperand(ecx, Array::kLengthOffset));
__ mov(ebx, ecx);
__ SmiUntag(ebx); // untag
__ cmp(ebx, FieldOperand(edi, Array::kLengthOffset));
__ j(above_equal, &slow, not_taken);
// Restore tag and increment.
__ lea(ebx, Operand(ebx, times_2, 1 << kSmiTagSize));
__ mov(FieldOperand(edx, JSArray::kLengthOffset), ebx);
__ sub(Operand(ebx), Immediate(1 << kSmiTagSize)); // decrement ebx again
// Add 1 to receiver->length, and go to fast array write.
__ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(1 << kSmiTagSize));
__ jmp(&fast);
// Array case: Get the length and the elements array from the JS
@ -729,28 +716,26 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// length is always a smi.
__ bind(&array);
// eax: value
// edx: JSArray
// ebx: index (as a smi)
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
Immediate(Factory::fixed_array_map()));
__ j(not_equal, &check_pixel_array);
// edx: receiver, a JSArray
// ecx: key, a smi.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ CheckMap(edi, Factory::fixed_array_map(), &check_pixel_array, true);
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
__ cmp(ebx, FieldOperand(edx, JSArray::kLengthOffset));
__ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // Compare smis.
__ j(above_equal, &extra, not_taken);
// Fast case: Do the store.
__ bind(&fast);
// eax: value
// ecx: FixedArray
// ebx: index (as a smi)
__ mov(Operand(ecx, ebx, times_2, FixedArray::kHeaderSize - kHeapObjectTag),
eax);
// ecx: key (a smi)
// edx: receiver
// edi: FixedArray receiver->elements
__ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax);
// Update write barrier for the elements array address.
__ mov(edx, Operand(eax));
__ RecordWrite(ecx, 0, edx, ebx);
__ RecordWrite(edi, 0, edx, ecx);
__ ret(0);
}
@ -759,92 +744,91 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -- esp[4] : key
// -- esp[8] : receiver
// -----------------------------------
Label slow, check_heap_number;
// Get the receiver from the stack.
__ mov(edx, Operand(esp, 2 * kPointerSize));
// Check that the object isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &slow);
// Get the map from the receiver.
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
__ movzx_b(ebx, FieldOperand(edi, Map::kBitFieldOffset));
__ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow);
// Get the key from the stack.
__ mov(ebx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
// Check that the key is a smi.
__ test(ebx, Immediate(kSmiTagMask));
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &slow);
// Get the instance type from the map of the receiver.
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
// Check that the object is a JS object.
__ cmp(ecx, JS_OBJECT_TYPE);
__ CmpInstanceType(edi, JS_OBJECT_TYPE);
__ j(not_equal, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// eax: value
// edx: JSObject
// ebx: index (as a smi)
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
Handle<Map> map(Heap::MapForExternalArrayType(array_type));
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
Immediate(map));
__ j(not_equal, &slow);
// edx: receiver, a JSObject
// ecx: key, a smi
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ CheckMap(edi, Handle<Map>(Heap::MapForExternalArrayType(array_type)),
&slow, true);
// Check that the index is in range.
__ sar(ebx, kSmiTagSize); // Untag the index.
__ cmp(ebx, FieldOperand(ecx, ExternalArray::kLengthOffset));
__ mov(ebx, ecx);
__ SmiUntag(ebx);
__ cmp(ebx, FieldOperand(edi, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// eax: value
// ecx: elements array
// edx: receiver
// ecx: key
// edi: elements array
// ebx: untagged index
__ test(eax, Immediate(kSmiTagMask));
__ j(not_equal, &check_heap_number);
// smi case
__ mov(edx, eax); // Save the value.
__ sar(eax, kSmiTagSize); // Untag the value.
__ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
__ mov(ecx, eax); // Preserve the value in eax. Key is no longer needed.
__ SmiUntag(ecx);
__ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
// ecx: base pointer of external storage
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ mov_b(Operand(ecx, ebx, times_1, 0), eax);
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ mov_w(Operand(ecx, ebx, times_2, 0), eax);
__ mov_w(Operand(edi, ebx, times_2, 0), ecx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ mov(Operand(ecx, ebx, times_4, 0), eax);
__ mov(Operand(edi, ebx, times_4, 0), ecx);
break;
case kExternalFloatArray:
// Need to perform int-to-float conversion.
__ push(eax);
__ push(ecx);
__ fild_s(Operand(esp, 0));
__ pop(eax);
__ fstp_s(Operand(ecx, ebx, times_4, 0));
__ pop(ecx);
__ fstp_s(Operand(edi, ebx, times_4, 0));
break;
default:
UNREACHABLE();
break;
}
__ mov(eax, edx); // Return the original value.
__ ret(0);
__ ret(0); // Return the original value.
__ bind(&check_heap_number);
// eax: value
// edx: receiver
// ecx: key
// edi: elements array
// ebx: untagged index
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map()));
__ j(not_equal, &slow);
@ -853,14 +837,12 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ mov(edx, eax); // Save the value.
__ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
__ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
// ebx: untagged index
// ecx: base pointer of external storage
// edi: base pointer of external storage
// top of FPU stack: value
if (array_type == kExternalFloatArray) {
__ fstp_s(Operand(ecx, ebx, times_4, 0));
__ mov(eax, edx); // Return the original value.
__ fstp_s(Operand(edi, ebx, times_4, 0));
__ ret(0);
} else {
// Need to perform float-to-int conversion.
@ -870,29 +852,27 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
__ j(parity_even, &is_nan);
if (array_type != kExternalUnsignedIntArray) {
__ push(eax); // Make room on stack
__ push(ecx); // Make room on stack
__ fistp_s(Operand(esp, 0));
__ pop(eax);
__ pop(ecx);
} else {
// fistp stores values as signed integers.
// To represent the entire range, we need to store as a 64-bit
// int and discard the high 32 bits.
__ push(eax); // Make room on stack
__ push(eax); // Make room on stack
__ sub(Operand(esp), Immediate(2 * kPointerSize));
__ fistp_d(Operand(esp, 0));
__ pop(eax);
__ mov(Operand(esp, 0), eax);
__ pop(eax);
__ pop(ecx);
__ add(Operand(esp), Immediate(kPointerSize));
}
// eax: untagged integer value
// ecx: untagged integer value
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ mov_b(Operand(ecx, ebx, times_1, 0), eax);
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ mov_w(Operand(ecx, ebx, times_2, 0), eax);
__ mov_w(Operand(edi, ebx, times_2, 0), ecx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray: {
@ -903,21 +883,20 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// This test would apparently detect both NaN and Infinity,
// but we've already checked for NaN using the FPU hardware
// above.
__ mov_w(edi, FieldOperand(edx, HeapNumber::kValueOffset + 6));
__ and_(edi, 0x7FF0);
__ cmp(edi, 0x7FF0);
__ mov_w(edx, FieldOperand(eax, HeapNumber::kValueOffset + 6));
__ and_(edx, 0x7FF0);
__ cmp(edx, 0x7FF0);
__ j(not_equal, &not_infinity);
__ mov(eax, 0);
__ mov(ecx, 0);
__ bind(&not_infinity);
__ mov(Operand(ecx, ebx, times_4, 0), eax);
__ mov(Operand(edi, ebx, times_4, 0), ecx);
break;
}
default:
UNREACHABLE();
break;
}
__ mov(eax, edx); // Return the original value.
__ ret(0);
__ ret(0); // Return original value.
__ bind(&is_nan);
__ ffree();
@ -925,23 +904,22 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ mov_b(Operand(ecx, ebx, times_1, 0), 0);
__ mov_b(Operand(edi, ebx, times_1, 0), 0);
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ mov(eax, 0);
__ mov_w(Operand(ecx, ebx, times_2, 0), eax);
__ xor_(ecx, Operand(ecx));
__ mov_w(Operand(edi, ebx, times_2, 0), ecx);
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ mov(Operand(ecx, ebx, times_4, 0), Immediate(0));
__ mov(Operand(edi, ebx, times_4, 0), Immediate(0));
break;
default:
UNREACHABLE();
break;
}
__ mov(eax, edx); // Return the original value.
__ ret(0);
__ ret(0); // Return the original value.
}
// Slow case: call runtime.
@ -1262,7 +1240,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ push(ebx); // return address
// Perform tail call to the entry.
__ TailCallRuntime(ExternalReference(IC_Utility(kLoadIC_Miss)), 2, 1);
ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss));
__ TailCallExternalReference(ref, 2, 1);
}
@ -1377,7 +1356,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ push(ebx); // return address
// Perform tail call to the entry.
__ TailCallRuntime(ExternalReference(IC_Utility(kKeyedLoadIC_Miss)), 2, 1);
ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
__ TailCallExternalReference(ref, 2, 1);
}
@ -1394,7 +1374,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ push(ebx); // return address
// Perform tail call to the entry.
__ TailCallRuntime(ExternalReference(Runtime::kKeyedGetProperty), 2, 1);
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
}
@ -1431,7 +1411,8 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
__ push(ebx);
// Perform tail call to the entry.
__ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss));
__ TailCallExternalReference(ref, 3, 1);
}
@ -1478,7 +1459,8 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
__ push(value);
__ push(scratch); // return address
__ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_ArrayLength)), 2, 1);
ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength));
__ TailCallExternalReference(ref, 2, 1);
__ bind(&miss);
@ -1492,38 +1474,39 @@ Object* KeyedStoreIC_Miss(Arguments args);
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -- esp[4] : key
// -- esp[8] : receiver
// -----------------------------------
__ pop(ecx);
__ push(Operand(esp, 1 * kPointerSize));
__ push(Operand(esp, 1 * kPointerSize));
__ push(eax);
__ pop(ebx);
__ push(edx);
__ push(ecx);
__ push(eax);
__ push(ebx);
// Do tail-call to runtime routine.
__ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
__ TailCallRuntime(Runtime::kSetProperty, 3, 1);
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -- esp[4] : key
// -- esp[8] : receiver
// -----------------------------------
__ pop(ecx);
__ push(Operand(esp, 1 * kPointerSize));
__ push(Operand(esp, 1 * kPointerSize));
__ push(eax);
__ pop(ebx);
__ push(edx);
__ push(ecx);
__ push(eax);
__ push(ebx);
// Do tail-call to runtime routine.
__ TailCallRuntime(ExternalReference(IC_Utility(kKeyedStoreIC_Miss)), 3, 1);
ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
__ TailCallExternalReference(ref, 3, 1);
}
#undef __

57
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -164,7 +164,10 @@ void MacroAssembler::RecordWrite(Register object, int offset,
if (Serializer::enabled()) {
// Can't do arithmetic on external references if it might get serialized.
mov(value, Operand(object));
and_(value, Heap::NewSpaceMask());
// The mask isn't really an address. We load it as an external reference in
// case the size of the new space is different between the snapshot maker
// and the running system.
and_(Operand(value), Immediate(ExternalReference::new_space_mask()));
cmp(Operand(value), Immediate(ExternalReference::new_space_start()));
j(equal, &done);
} else {
@ -1186,15 +1189,22 @@ Object* MacroAssembler::TryCallRuntime(Runtime::Function* f,
}
void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
int num_arguments,
int result_size) {
void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
int num_arguments,
int result_size) {
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
Set(eax, Immediate(num_arguments));
JumpToRuntime(ext);
JumpToExternalReference(ext);
}
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
}
@ -1264,7 +1274,7 @@ Object* MacroAssembler::TryPopHandleScope(Register saved, Register scratch) {
}
void MacroAssembler::JumpToRuntime(const ExternalReference& ext) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
// Set the entry point and jump to the C entry runtime stub.
mov(ebx, Immediate(ext));
CEntryStub ces(1);
@ -1615,6 +1625,41 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
}
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
int frameAlignment = OS::ActivationFrameAlignment();
if (frameAlignment != 0) {
// Make stack end at alignment and make room for num_arguments words
// and the original value of esp.
mov(scratch, esp);
sub(Operand(esp), Immediate((num_arguments + 1) * kPointerSize));
ASSERT(IsPowerOf2(frameAlignment));
and_(esp, -frameAlignment);
mov(Operand(esp, num_arguments * kPointerSize), scratch);
} else {
sub(Operand(esp), Immediate(num_arguments * kPointerSize));
}
}
void MacroAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
// Trashing eax is ok as it will be the return value.
mov(Operand(eax), Immediate(function));
CallCFunction(eax, num_arguments);
}
void MacroAssembler::CallCFunction(Register function,
int num_arguments) {
call(Operand(function));
if (OS::ActivationFrameAlignment() != 0) {
mov(esp, Operand(esp, num_arguments * kPointerSize));
} else {
add(Operand(esp), Immediate(num_arguments * sizeof(int32_t)));
}
}
CodePatcher::CodePatcher(byte* address, int size)
: address_(address), size_(size), masm_(address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.

31
deps/v8/src/ia32/macro-assembler-ia32.h

@ -349,7 +349,6 @@ class MacroAssembler: public Assembler {
void StubReturn(int argc);
// Call a runtime routine.
// Eventually this should be used for all C calls.
void CallRuntime(Runtime::Function* f, int num_arguments);
// Call a runtime function, returning the CodeStub object called.
@ -367,12 +366,34 @@ class MacroAssembler: public Assembler {
Object* TryCallRuntime(Runtime::FunctionId id, int num_arguments);
// Tail call of a runtime routine (jump).
// Like JumpToRuntime, but also takes care of passing the number
// of arguments.
void TailCallRuntime(const ExternalReference& ext,
// Like JumpToExternalReference, but also takes care of passing the number
// of parameters.
void TailCallExternalReference(const ExternalReference& ext,
int num_arguments,
int result_size);
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in esp[0], esp[4],
// etc., not pushed. The argument count assumes all arguments are word sized.
// Some compilers/platforms require the stack to be aligned when calling
// C++ code.
// Needs a scratch register to do some arithmetic. This register will be
// trashed.
void PrepareCallCFunction(int num_arguments, Register scratch);
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
// garbage collection, since that might move the code and invalidate the
// return address (unless this is somehow accounted for by the called
// function).
void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, int num_arguments);
void PushHandleScope(Register scratch);
// Pops a handle scope using the specified scratch register and
@ -384,7 +405,7 @@ class MacroAssembler: public Assembler {
Object* TryPopHandleScope(Register saved, Register scratch);
// Jump to a runtime routine.
void JumpToRuntime(const ExternalReference& ext);
void JumpToExternalReference(const ExternalReference& ext);
// ---------------------------------------------------------------------------

49
deps/v8/src/ia32/regexp-macro-assembler-ia32.cc

@ -324,8 +324,8 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
__ push(backtrack_stackpointer());
__ push(ebx);
const int argument_count = 3;
FrameAlign(argument_count, ecx);
static const int argument_count = 3;
__ PrepareCallCFunction(argument_count, ecx);
// Put arguments into allocated stack area, last argument highest on stack.
// Parameters are
// Address byte_offset1 - Address captured substring's start.
@ -346,7 +346,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
ExternalReference compare =
ExternalReference::re_case_insensitive_compare_uc16();
CallCFunction(compare, argument_count);
__ CallCFunction(compare, argument_count);
// Pop original values before reacting on result value.
__ pop(ebx);
__ pop(backtrack_stackpointer());
@ -784,13 +784,13 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ push(edi);
// Call GrowStack(backtrack_stackpointer())
int num_arguments = 2;
FrameAlign(num_arguments, ebx);
static const int num_arguments = 2;
__ PrepareCallCFunction(num_arguments, ebx);
__ lea(eax, Operand(ebp, kStackHighEnd));
__ mov(Operand(esp, 1 * kPointerSize), eax);
__ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer());
ExternalReference grow_stack = ExternalReference::re_grow_stack();
CallCFunction(grow_stack, num_arguments);
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
__ or_(eax, Operand(eax));
@ -951,8 +951,8 @@ void RegExpMacroAssemblerIA32::WriteStackPointerToRegister(int reg) {
// Private methods:
void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
int num_arguments = 3;
FrameAlign(num_arguments, scratch);
static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments, scratch);
// RegExp code frame pointer.
__ mov(Operand(esp, 2 * kPointerSize), ebp);
// Code* of self.
@ -962,7 +962,7 @@ void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
__ mov(Operand(esp, 0 * kPointerSize), eax);
ExternalReference check_stack_guard =
ExternalReference::re_check_stack_guard_state();
CallCFunction(check_stack_guard, num_arguments);
__ CallCFunction(check_stack_guard, num_arguments);
}
@ -1153,37 +1153,6 @@ void RegExpMacroAssemblerIA32::CheckStackLimit() {
}
void RegExpMacroAssemblerIA32::FrameAlign(int num_arguments, Register scratch) {
// TODO(lrn): Since we no longer use the system stack arbitrarily (but we do
// use it, e.g., for SafeCall), we know the number of elements on the stack
// since the last frame alignment. We might be able to do this simpler then.
int frameAlignment = OS::ActivationFrameAlignment();
if (frameAlignment != 0) {
// Make stack end at alignment and make room for num_arguments words
// and the original value of esp.
__ mov(scratch, esp);
__ sub(Operand(esp), Immediate((num_arguments + 1) * kPointerSize));
ASSERT(IsPowerOf2(frameAlignment));
__ and_(esp, -frameAlignment);
__ mov(Operand(esp, num_arguments * kPointerSize), scratch);
} else {
__ sub(Operand(esp), Immediate(num_arguments * kPointerSize));
}
}
void RegExpMacroAssemblerIA32::CallCFunction(ExternalReference function,
int num_arguments) {
__ mov(Operand(eax), Immediate(function));
__ call(Operand(eax));
if (OS::ActivationFrameAlignment() != 0) {
__ mov(esp, Operand(esp, num_arguments * kPointerSize));
} else {
__ add(Operand(esp), Immediate(num_arguments * sizeof(int32_t)));
}
}
void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
if (mode_ == ASCII) {

15
deps/v8/src/ia32/regexp-macro-assembler-ia32.h

@ -187,21 +187,6 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
// (ecx) and increments it by a word size.
inline void Pop(Register target);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, arguments must be stored in esp[0], esp[4],
// etc., not pushed. The argument count assumes all arguments are word sized.
// Some compilers/platforms require the stack to be aligned when calling
// C++ code.
// Needs a scratch register to do some arithmetic. This register will be
// trashed.
inline void FrameAlign(int num_arguments, Register scratch);
// Calls a C function and cleans up the space for arguments allocated
// by FrameAlign. The called function is not allowed to trigger a garbage
// collection, since that might move the code and invalidate the return
// address (unless this is somehow accounted for).
inline void CallCFunction(ExternalReference function, int num_arguments);
MacroAssembler* masm_;
// Which mode to generate code for (ASCII or UC16).

1
deps/v8/src/ia32/register-allocator-ia32.cc

@ -49,6 +49,7 @@ void Result::ToRegister() {
Immediate(handle()));
}
// This result becomes a copy of the fresh one.
fresh.set_number_info(number_info());
*this = fresh;
}
ASSERT(is_register());

19
deps/v8/src/ia32/stub-cache-ia32.cc

@ -446,7 +446,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallRuntime(ref, 5, 1);
__ TailCallExternalReference(ref, 5, 1);
__ bind(&cleanup);
__ pop(scratch1);
@ -468,7 +468,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
__ TailCallRuntime(ref, 5, 1);
__ TailCallExternalReference(ref, 5, 1);
}
private:
@ -907,7 +907,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ push(Immediate(Handle<Map>(transition)));
__ push(eax);
__ push(scratch);
__ TailCallRuntime(
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)), 3, 1);
return;
}
@ -1589,7 +1589,7 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty));
__ TailCallRuntime(store_callback_property, 4, 1);
__ TailCallExternalReference(store_callback_property, 4, 1);
// Handle store cache miss.
__ bind(&miss);
@ -1638,7 +1638,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
__ TailCallRuntime(store_ic_property, 3, 1);
__ TailCallExternalReference(store_ic_property, 3, 1);
// Handle store cache miss.
__ bind(&miss);
@ -1691,23 +1691,18 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
String* name) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -- esp[4] : key
// -- esp[8] : receiver
// -----------------------------------
Label miss;
__ IncrementCounter(&Counters::keyed_store_field, 1);
// Get the name from the stack.
__ mov(ecx, Operand(esp, 1 * kPointerSize));
// Check that the name has not changed.
__ cmp(Operand(ecx), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
// Get the object from the stack.
__ mov(edx, Operand(esp, 2 * kPointerSize));
// Generate store field code. Trashes the name register.
GenerateStoreField(masm(),
object,

154
deps/v8/src/ia32/virtual-frame-ia32.cc

@ -37,23 +37,6 @@ namespace internal {
#define __ ACCESS_MASM(masm())
// -------------------------------------------------------------------------
// VirtualFrame implementation.
// On entry to a function, the virtual frame already contains the receiver,
// the parameters, and a return address. All frame elements are in memory.
VirtualFrame::VirtualFrame()
: elements_(parameter_count() + local_count() + kPreallocatedElements),
stack_pointer_(parameter_count() + 1) { // 0-based index of TOS.
for (int i = 0; i <= stack_pointer_; i++) {
elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
}
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
register_locations_[i] = kIllegalIndex;
}
}
void VirtualFrame::SyncElementBelowStackPointer(int index) {
// Emit code to write elements below the stack pointer to their
// (already allocated) stack address.
@ -179,7 +162,7 @@ void VirtualFrame::MakeMergable() {
if (element.is_constant() || element.is_copy()) {
if (element.is_synced()) {
// Just spill.
elements_[i] = FrameElement::MemoryElement(NumberInfo::kUnknown);
elements_[i] = FrameElement::MemoryElement(NumberInfo::Unknown());
} else {
// Allocate to a register.
FrameElement backing_element; // Invalid if not a copy.
@ -191,7 +174,7 @@ void VirtualFrame::MakeMergable() {
elements_[i] =
FrameElement::RegisterElement(fresh.reg(),
FrameElement::NOT_SYNCED,
NumberInfo::kUnknown);
NumberInfo::Unknown());
Use(fresh.reg(), i);
// Emit a move.
@ -224,7 +207,7 @@ void VirtualFrame::MakeMergable() {
// The copy flag is not relied on before the end of this loop,
// including when registers are spilled.
elements_[i].clear_copied();
elements_[i].set_number_info(NumberInfo::kUnknown);
elements_[i].set_number_info(NumberInfo::Unknown());
}
}
}
@ -896,30 +879,39 @@ Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
}
// This function assumes that the only results that could be in a_reg or b_reg
// are a and b. Other results can be live, but must not be in a_reg or b_reg.
void VirtualFrame::MoveResultsToRegisters(Result* a,
Result* b,
Register a_reg,
Register b_reg) {
if (a->is_register() && a->reg().is(a_reg)) {
b->ToRegister(b_reg);
} else if (!cgen()->allocator()->is_used(a_reg)) {
a->ToRegister(a_reg);
b->ToRegister(b_reg);
} else if (cgen()->allocator()->is_used(b_reg)) {
// a must be in b_reg, b in a_reg.
__ xchg(a_reg, b_reg);
// Results a and b will be invalidated, so it is ok if they are switched.
} else {
b->ToRegister(b_reg);
a->ToRegister(a_reg);
}
a->Unuse();
b->Unuse();
}
Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
// Name and receiver are on the top of the frame. The IC expects
// name in ecx and receiver in eax.
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
Result name = Pop();
Result receiver = Pop();
PrepareForCall(0, 0); // No stack arguments.
// Move results to the right registers:
if (name.is_register() && name.reg().is(eax)) {
if (receiver.is_register() && receiver.reg().is(ecx)) {
// Wrong registers.
__ xchg(eax, ecx);
} else {
// Register ecx is free for name, which frees eax for receiver.
name.ToRegister(ecx);
receiver.ToRegister(eax);
}
} else {
// Register eax is free for receiver, which frees ecx for name.
receiver.ToRegister(eax);
name.ToRegister(ecx);
}
name.Unuse();
receiver.Unuse();
MoveResultsToRegisters(&name, &receiver, ecx, eax);
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
return RawCallCodeObject(ic, mode);
}
@ -929,20 +921,7 @@ Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
Result key = Pop();
Result receiver = Pop();
PrepareForCall(0, 0);
if (!key.is_register() || !key.reg().is(edx)) {
// Register edx is available for receiver.
receiver.ToRegister(edx);
key.ToRegister(eax);
} else if (!receiver.is_register() || !receiver.reg().is(eax)) {
// Register eax is available for key.
key.ToRegister(eax);
receiver.ToRegister(edx);
} else {
__ xchg(edx, eax);
}
key.Unuse();
receiver.Unuse();
MoveResultsToRegisters(&key, &receiver, eax, edx);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
return RawCallCodeObject(ic, mode);
@ -958,42 +937,57 @@ Result VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
PrepareForCall(0, 0);
value.ToRegister(eax);
__ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(ecx, name);
value.Unuse();
} else {
Result receiver = Pop();
PrepareForCall(0, 0);
if (value.is_register() && value.reg().is(edx)) {
if (receiver.is_register() && receiver.reg().is(eax)) {
// Wrong registers.
__ xchg(eax, edx);
} else {
// Register eax is free for value, which frees edx for receiver.
value.ToRegister(eax);
receiver.ToRegister(edx);
}
} else {
// Register edx is free for receiver, which guarantees eax is free for
// value.
receiver.ToRegister(edx);
value.ToRegister(eax);
}
MoveResultsToRegisters(&value, &receiver, eax, edx);
}
__ mov(ecx, name);
value.Unuse();
return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
}
Result VirtualFrame::CallKeyedStoreIC() {
// Value, key, and receiver are on the top of the frame. The IC
// expects value in eax and key and receiver on the stack. It does
// not drop the key and receiver.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
// expects value in eax, key in ecx, and receiver in edx.
Result value = Pop();
PrepareForCall(2, 0); // Two stack args, neither callee-dropped.
value.ToRegister(eax);
value.Unuse();
Result key = Pop();
Result receiver = Pop();
PrepareForCall(0, 0);
if (!cgen()->allocator()->is_used(eax) ||
(value.is_register() && value.reg().is(eax))) {
value.ToRegister(eax); // No effect if value is in eax already.
MoveResultsToRegisters(&key, &receiver, ecx, edx);
value.Unuse();
} else if (!cgen()->allocator()->is_used(ecx) ||
(key.is_register() && key.reg().is(ecx))) {
// Receiver and/or key are in eax.
key.ToRegister(ecx);
MoveResultsToRegisters(&value, &receiver, eax, edx);
key.Unuse();
} else if (!cgen()->allocator()->is_used(edx) ||
(receiver.is_register() && receiver.reg().is(edx))) {
receiver.ToRegister(edx);
MoveResultsToRegisters(&key, &value, ecx, eax);
receiver.Unuse();
} else {
// All three registers are used, and no value is in the correct place.
// We have one of the two circular permutations of eax, ecx, edx.
ASSERT(value.is_register());
if (value.reg().is(ecx)) {
__ xchg(eax, edx);
__ xchg(eax, ecx);
} else {
__ xchg(eax, ecx);
__ xchg(eax, edx);
}
value.Unuse();
key.Unuse();
receiver.Unuse();
}
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
}
@ -1068,7 +1062,7 @@ Result VirtualFrame::Pop() {
ASSERT(element.is_valid());
// Get number type information of the result.
NumberInfo::Type info;
NumberInfo info;
if (!element.is_copy()) {
info = element.number_info();
} else {
@ -1143,7 +1137,7 @@ void VirtualFrame::EmitPop(Operand operand) {
}
void VirtualFrame::EmitPush(Register reg, NumberInfo::Type info) {
void VirtualFrame::EmitPush(Register reg, NumberInfo info) {
ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++;
@ -1151,7 +1145,7 @@ void VirtualFrame::EmitPush(Register reg, NumberInfo::Type info) {
}
void VirtualFrame::EmitPush(Operand operand, NumberInfo::Type info) {
void VirtualFrame::EmitPush(Operand operand, NumberInfo info) {
ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++;
@ -1159,7 +1153,7 @@ void VirtualFrame::EmitPush(Operand operand, NumberInfo::Type info) {
}
void VirtualFrame::EmitPush(Immediate immediate, NumberInfo::Type info) {
void VirtualFrame::EmitPush(Immediate immediate, NumberInfo info) {
ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++;

20
deps/v8/src/ia32/virtual-frame-ia32.h

@ -73,7 +73,7 @@ class VirtualFrame: public ZoneObject {
static const int kIllegalIndex = -1;
// Construct an initial virtual frame on entry to a JS function.
VirtualFrame();
inline VirtualFrame();
// Construct a virtual frame as a clone of an existing one.
explicit inline VirtualFrame(VirtualFrame* original);
@ -84,7 +84,7 @@ class VirtualFrame: public ZoneObject {
// Create a duplicate of an existing valid frame element.
FrameElement CopyElementAt(int index,
NumberInfo::Type info = NumberInfo::kUninitialized);
NumberInfo info = NumberInfo::Uninitialized());
// The number of elements on the virtual frame.
int element_count() { return elements_.length(); }
@ -388,14 +388,14 @@ class VirtualFrame: public ZoneObject {
// Push an element on top of the expression stack and emit a
// corresponding push instruction.
void EmitPush(Register reg,
NumberInfo::Type info = NumberInfo::kUnknown);
NumberInfo info = NumberInfo::Unknown());
void EmitPush(Operand operand,
NumberInfo::Type info = NumberInfo::kUnknown);
NumberInfo info = NumberInfo::Unknown());
void EmitPush(Immediate immediate,
NumberInfo::Type info = NumberInfo::kUnknown);
NumberInfo info = NumberInfo::Unknown());
// Push an element on the virtual frame.
inline void Push(Register reg, NumberInfo::Type info = NumberInfo::kUnknown);
inline void Push(Register reg, NumberInfo info = NumberInfo::Unknown());
inline void Push(Handle<Object> value);
inline void Push(Smi* value);
@ -571,6 +571,14 @@ class VirtualFrame: public ZoneObject {
// Register counts are correctly updated.
int InvalidateFrameSlotAt(int index);
// This function assumes that a and b are the only results that could be in
// the registers a_reg or b_reg. Other results can be live, but must not
// be in the registers a_reg or b_reg. The results a and b are invalidated.
void MoveResultsToRegisters(Result* a,
Result* b,
Register a_reg,
Register b_reg);
// Call a code stub that has already been prepared for calling (via
// PrepareForCall).
Result RawCallStub(CodeStub* stub);

145
deps/v8/src/ic.cc

@ -63,7 +63,9 @@ void IC::TraceIC(const char* type,
Code* new_target,
const char* extra_info) {
if (FLAG_trace_ic) {
State new_state = StateFrom(new_target, Heap::undefined_value());
State new_state = StateFrom(new_target,
Heap::undefined_value(),
Heap::undefined_value());
PrintF("[%s (%c->%c)%s", type,
TransitionMarkFromState(old_state),
TransitionMarkFromState(new_state),
@ -132,7 +134,7 @@ Address IC::OriginalCodeAddress() {
}
#endif
IC::State IC::StateFrom(Code* target, Object* receiver) {
IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
IC::State state = target->ic_state();
if (state != MONOMORPHIC) return state;
@ -148,7 +150,7 @@ IC::State IC::StateFrom(Code* target, Object* receiver) {
// the receiver map's code cache. Therefore, if the current target
// is in the receiver map's code cache, the inline cache failed due
// to prototype check failure.
int index = map->IndexInCodeCache(target);
int index = map->IndexInCodeCache(String::cast(name), target);
if (index >= 0) {
// For keyed load/store, the most likely cause of cache failure is
// that the key has changed. We do not distinguish between
@ -160,7 +162,7 @@ IC::State IC::StateFrom(Code* target, Object* receiver) {
// Remove the target from the code cache to avoid hitting the same
// invalid stub again.
map->RemoveFromCodeCache(index);
map->RemoveFromCodeCache(String::cast(name), target, index);
return MONOMORPHIC_PROTOTYPE_FAILURE;
}
@ -222,6 +224,7 @@ void IC::Clear(Address address) {
case Code::STORE_IC: return StoreIC::Clear(address, target);
case Code::KEYED_STORE_IC: return KeyedStoreIC::Clear(address, target);
case Code::CALL_IC: return CallIC::Clear(address, target);
case Code::BINARY_OP_IC: return BinaryOpIC::Clear(address, target);
default: UNREACHABLE();
}
}
@ -1049,6 +1052,20 @@ Object* StoreIC::Store(State state,
return *value;
}
// Use specialized code for setting the length of arrays.
if (receiver->IsJSArray()
&& name->Equals(Heap::length_symbol())
&& receiver->AllowsSetElementsLength()) {
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n");
#endif
Code* target = Builtins::builtin(Builtins::StoreIC_ArrayLength);
set_target(target);
StubCache::Set(*name, HeapObject::cast(*object)->map(), target);
return receiver->SetProperty(*name, *value, NONE);
}
// Lookup the property locally in the receiver.
if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
LookupResult lookup;
@ -1285,7 +1302,7 @@ Object* CallIC_Miss(Arguments args) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
CallIC ic;
IC::State state = IC::StateFrom(ic.target(), args[0]);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Object* result =
ic.LoadFunction(state, args.at<Object>(0), args.at<String>(1));
@ -1318,7 +1335,7 @@ Object* LoadIC_Miss(Arguments args) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
LoadIC ic;
IC::State state = IC::StateFrom(ic.target(), args[0]);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
return ic.Load(state, args.at<Object>(0), args.at<String>(1));
}
@ -1328,7 +1345,7 @@ Object* KeyedLoadIC_Miss(Arguments args) {
NoHandleAllocation na;
ASSERT(args.length() == 2);
KeyedLoadIC ic;
IC::State state = IC::StateFrom(ic.target(), args[0]);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
return ic.Load(state, args.at<Object>(0), args.at<Object>(1));
}
@ -1338,7 +1355,7 @@ Object* StoreIC_Miss(Arguments args) {
NoHandleAllocation na;
ASSERT(args.length() == 3);
StoreIC ic;
IC::State state = IC::StateFrom(ic.target(), args[0]);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
return ic.Store(state, args.at<Object>(0), args.at<String>(1),
args.at<Object>(2));
}
@ -1351,7 +1368,9 @@ Object* StoreIC_ArrayLength(Arguments args) {
JSObject* receiver = JSObject::cast(args[0]);
Object* len = args[1];
return receiver->SetElementsLength(len);
Object* result = receiver->SetElementsLength(len);
if (result->IsFailure()) return result;
return len;
}
@ -1394,12 +1413,118 @@ Object* KeyedStoreIC_Miss(Arguments args) {
NoHandleAllocation na;
ASSERT(args.length() == 3);
KeyedStoreIC ic;
IC::State state = IC::StateFrom(ic.target(), args[0]);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
return ic.Store(state, args.at<Object>(0), args.at<Object>(1),
args.at<Object>(2));
}
void BinaryOpIC::patch(Code* code) {
set_target(code);
}
void BinaryOpIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
// At the end of a fast case stub there should be a reference to
// a corresponding UNINITIALIZED stub, so look for the last reloc info item.
RelocInfo* rinfo = NULL;
for (RelocIterator it(target, RelocInfo::kCodeTargetMask);
!it.done(); it.next()) {
rinfo = it.rinfo();
}
ASSERT(rinfo != NULL);
Code* uninit_stub = Code::GetCodeFromTargetAddress(rinfo->target_address());
ASSERT(uninit_stub->ic_state() == UNINITIALIZED &&
uninit_stub->kind() == Code::BINARY_OP_IC);
SetTargetAtAddress(address, uninit_stub);
}
const char* BinaryOpIC::GetName(TypeInfo type_info) {
switch (type_info) {
case DEFAULT: return "Default";
case GENERIC: return "Generic";
case HEAP_NUMBERS: return "HeapNumbers";
case STRINGS: return "Strings";
default: return "Invalid";
}
}
BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
switch (type_info) {
// DEFAULT is mapped to UNINITIALIZED so that calls to DEFAULT stubs
// are not cleared at GC.
case DEFAULT: return UNINITIALIZED;
// Could have mapped GENERIC to MONOMORPHIC just as well but MEGAMORPHIC is
// conceptually closer.
case GENERIC: return MEGAMORPHIC;
default: return MONOMORPHIC;
}
}
BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Object* left,
Object* right) {
// Patching is never requested for the two smis.
ASSERT(!left->IsSmi() || !right->IsSmi());
if (left->IsNumber() && right->IsNumber()) {
return HEAP_NUMBERS;
}
if (left->IsString() || right->IsString()) {
// Patching for fast string ADD makes sense even if only one of the
// arguments is a string.
return STRINGS;
}
return GENERIC;
}
// defined in codegen-<arch>.cc
Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info);
Object* BinaryOp_Patch(Arguments args) {
ASSERT(args.length() == 6);
Handle<Object> left = args.at<Object>(0);
Handle<Object> right = args.at<Object>(1);
Handle<Object> result = args.at<Object>(2);
int key = Smi::cast(args[3])->value();
#ifdef DEBUG
Token::Value op = static_cast<Token::Value>(Smi::cast(args[4])->value());
BinaryOpIC::TypeInfo prev_type_info =
static_cast<BinaryOpIC::TypeInfo>(Smi::cast(args[5])->value());
#endif // DEBUG
{ HandleScope scope;
BinaryOpIC::TypeInfo type_info = BinaryOpIC::GetTypeInfo(*left, *right);
Handle<Code> code = GetBinaryOpStub(key, type_info);
if (!code.is_null()) {
BinaryOpIC ic;
ic.patch(*code);
#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[BinaryOpIC (%s->%s)#%s]\n",
BinaryOpIC::GetName(prev_type_info),
BinaryOpIC::GetName(type_info),
Token::Name(op));
}
#endif // DEBUG
}
}
return *result;
}
static Address IC_utilities[] = {
#define ADDR(name) FUNCTION_ADDR(name),
IC_UTIL_LIST(ADDR)

31
deps/v8/src/ic.h

@ -55,7 +55,8 @@ enum DictionaryCheck { CHECK_DICTIONARY, DICTIONARY_CHECK_DONE };
ICU(LoadPropertyWithInterceptorForLoad) \
ICU(LoadPropertyWithInterceptorForCall) \
ICU(KeyedLoadPropertyWithInterceptor) \
ICU(StoreInterceptorProperty)
ICU(StoreInterceptorProperty) \
ICU(BinaryOp_Patch)
//
// IC is the base class for LoadIC, StoreIC, CallIC, KeyedLoadIC,
@ -93,8 +94,8 @@ class IC {
Code* target() { return GetTargetAtAddress(address()); }
inline Address address();
// Compute the current IC state based on the target stub and the receiver.
static State StateFrom(Code* target, Object* receiver);
// Compute the current IC state based on the target stub, receiver and name.
static State StateFrom(Code* target, Object* receiver, Object* name);
// Clear the inline cache to initial state.
static void Clear(Address address);
@ -444,6 +445,30 @@ class KeyedStoreIC: public IC {
};
class BinaryOpIC: public IC {
public:
enum TypeInfo {
DEFAULT, // Initial state. When first executed, patches to one
// of the following states depending on the operands types.
HEAP_NUMBERS, // Both arguments are HeapNumbers.
STRINGS, // At least one of the arguments is String.
GENERIC // Non-specialized case (processes any type combination).
};
BinaryOpIC() : IC(NO_EXTRA_FRAME) { }
void patch(Code* code);
static void Clear(Address address, Code* target);
static const char* GetName(TypeInfo type_info);
static State ToState(TypeInfo type_info);
static TypeInfo GetTypeInfo(Object* left, Object* right);
};
} } // namespace v8::internal
#endif // V8_IC_H_

1
deps/v8/src/jsregexp.h

@ -29,6 +29,7 @@
#define V8_JSREGEXP_H_
#include "macro-assembler.h"
#include "zone-inl.h"
namespace v8 {
namespace internal {

2
deps/v8/src/jump-target-inl.h

@ -46,7 +46,7 @@ void JumpTarget::InitializeEntryElement(int index, FrameElement* target) {
entry_frame_->elements_[target->index()].set_copied();
}
if (direction_ == BIDIRECTIONAL && !target->is_copy()) {
element->set_number_info(NumberInfo::kUnknown);
element->set_number_info(NumberInfo::Unknown());
}
}

13
deps/v8/src/jump-target.cc

@ -135,7 +135,7 @@ void JumpTarget::ComputeEntryFrame() {
FrameElement* target = elements[index];
if (target == NULL) {
entry_frame_->elements_.Add(
FrameElement::MemoryElement(NumberInfo::kUninitialized));
FrameElement::MemoryElement(NumberInfo::Uninitialized()));
} else {
entry_frame_->elements_.Add(*target);
InitializeEntryElement(index, target);
@ -152,12 +152,12 @@ void JumpTarget::ComputeEntryFrame() {
RegisterFile candidate_registers;
int best_count = kMinInt;
int best_reg_num = RegisterAllocator::kInvalidRegister;
NumberInfo::Type info = NumberInfo::kUninitialized;
NumberInfo info = NumberInfo::Uninitialized();
for (int j = 0; j < reaching_frames_.length(); j++) {
FrameElement element = reaching_frames_[j]->elements_[i];
if (direction_ == BIDIRECTIONAL) {
info = NumberInfo::kUnknown;
info = NumberInfo::Unknown();
} else if (!element.is_copy()) {
info = NumberInfo::Combine(info, element.number_info());
} else {
@ -181,7 +181,7 @@ void JumpTarget::ComputeEntryFrame() {
// We must have a number type information now (not for copied elements).
ASSERT(entry_frame_->elements_[i].is_copy()
|| info != NumberInfo::kUninitialized);
|| !info.IsUninitialized());
// If the value is synced on all frames, put it in memory. This
// costs nothing at the merge code but will incur a
@ -211,7 +211,7 @@ void JumpTarget::ComputeEntryFrame() {
Register reg = RegisterAllocator::ToRegister(best_reg_num);
entry_frame_->elements_[i] =
FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED,
NumberInfo::kUninitialized);
NumberInfo::Uninitialized());
if (is_copied) entry_frame_->elements_[i].set_copied();
entry_frame_->set_register_location(reg, i);
}
@ -225,8 +225,7 @@ void JumpTarget::ComputeEntryFrame() {
if (direction_ == BIDIRECTIONAL) {
for (int i = 0; i < length; ++i) {
if (!entry_frame_->elements_[i].is_copy()) {
ASSERT(entry_frame_->elements_[i].number_info() ==
NumberInfo::kUnknown);
ASSERT(entry_frame_->elements_[i].number_info().IsUnknown());
}
}
}

1
deps/v8/src/jump-target.h

@ -29,6 +29,7 @@
#define V8_JUMP_TARGET_H_
#include "macro-assembler.h"
#include "zone-inl.h"
namespace v8 {
namespace internal {

426
deps/v8/src/liveedit-delay.js

@ -0,0 +1,426 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// LiveEdit feature implementation. The script should be executed after
// debug-delay.js.
// Changes script text and recompiles all relevant functions if possible.
// The change is always a substring (change_pos, change_pos + change_len)
// being replaced with a completely different string new_str.
//
// Only one function will have its Code changed in result of this function.
// All nested functions (should they have any instances at the moment) are left
// unchanged and re-linked to a newly created script instance representing old
// version of the source. (Generally speaking,
// during the change all nested functions are erased and completely different
// set of nested functions are introduced.) All other functions just have
// their positions updated.
//
// @param {Script} script that is being changed
// @param {Array} change_log a list that collects engineer-readable description
// of what happened.
Debug.LiveEditChangeScript = function(script, change_pos, change_len, new_str,
change_log) {
// So far the function works as namespace.
var liveedit = Debug.LiveEditChangeScript;
var Assert = liveedit.Assert;
// Fully compiles source string as a script. Returns Array of
// FunctionCompileInfo -- a descriptions of all functions of the script.
// Elements of array are ordered by start positions of functions (from top
// to bottom) in the source. Fields outer_index and next_sibling_index help
// to navigate the nesting structure of functions.
//
// The script is used for compilation, because it produces code that
// needs to be linked with some particular script (for nested functions).
function DebugGatherCompileInfo(source) {
// Get function info, elements are partially sorted (it is a tree
// of nested functions serialized as parent followed by serialized children.
var raw_compile_info = %LiveEditGatherCompileInfo(script, source);
// Sort function infos by start position field.
var compile_info = new Array();
var old_index_map = new Array();
for (var i = 0; i < raw_compile_info.length; i++) {
compile_info.push(new liveedit.FunctionCompileInfo(raw_compile_info[i]));
old_index_map.push(i);
}
for (var i = 0; i < compile_info.length; i++) {
var k = i;
for (var j = i + 1; j < compile_info.length; j++) {
if (compile_info[k].start_position > compile_info[j].start_position) {
k = j;
}
}
if (k != i) {
var temp_info = compile_info[k];
var temp_index = old_index_map[k];
compile_info[k] = compile_info[i];
old_index_map[k] = old_index_map[i];
compile_info[i] = temp_info;
old_index_map[i] = temp_index;
}
}
// After sorting update outer_inder field using old_index_map. Also
// set next_sibling_index field.
var current_index = 0;
// The recursive function, that goes over all children of a particular
// node (i.e. function info).
function ResetIndexes(new_parent_index, old_parent_index) {
var previous_sibling = -1;
while (current_index < compile_info.length &&
compile_info[current_index].outer_index == old_parent_index) {
var saved_index = current_index;
compile_info[saved_index].outer_index = new_parent_index;
if (previous_sibling != -1) {
compile_info[previous_sibling].next_sibling_index = saved_index;
}
previous_sibling = saved_index;
current_index++;
ResetIndexes(saved_index, old_index_map[saved_index]);
}
if (previous_sibling != -1) {
compile_info[previous_sibling].next_sibling_index = -1;
}
}
ResetIndexes(-1, -1);
Assert(current_index == compile_info.length);
return compile_info;
}
// Given a positions, finds a function that fully includes the entire change.
function FindChangedFunction(compile_info, offset, len) {
// First condition: function should start before the change region.
// Function #0 (whole-script function) always does, but we want
// one, that is later in this list.
var index = 0;
while (index + 1 < compile_info.length &&
compile_info[index + 1].start_position <= offset) {
index++;
}
// Now we are at the last function that begins before the change
// region. The function that covers entire change region is either
// this function or the enclosing one.
for (; compile_info[index].end_position < offset + len;
index = compile_info[index].outer_index) {
Assert(index != -1);
}
return index;
}
// Variable forward declarations. Preprocessor "Minifier" needs them.
var old_compile_info;
var shared_infos;
// Finds SharedFunctionInfo that corresponds compile info with index
// in old version of the script.
function FindFunctionInfo(index) {
var old_info = old_compile_info[index];
for (var i = 0; i < shared_infos.length; i++) {
var info = shared_infos[i];
if (info.start_position == old_info.start_position &&
info.end_position == old_info.end_position) {
return info;
}
}
}
// Replaces function's Code.
function PatchCode(new_info, shared_info) {
%LiveEditReplaceFunctionCode(new_info.raw_array, shared_info.raw_array);
change_log.push( {function_patched: new_info.function_name} );
}
var change_len_old;
var change_len_new;
// Translate position in old version of script into position in new
// version of script.
function PosTranslator(old_pos) {
if (old_pos <= change_pos) {
return old_pos;
}
if (old_pos >= change_pos + change_len_old) {
return old_pos + change_len_new - change_len_old;
}
return -1;
}
var position_change_array;
var position_patch_report;
function PatchPositions(new_info, shared_info) {
if (!shared_info) {
// TODO: explain what is happening.
return;
}
%LiveEditPatchFunctionPositions(shared_info.raw_array,
position_change_array);
position_patch_report.push( { name: new_info.function_name } );
}
var link_to_old_script_report;
var old_script;
// Makes a function associated with another instance of a script (the
// one representing its old version). This way the function still
// may access its own text.
function LinkToOldScript(shared_info) {
%LiveEditRelinkFunctionToScript(shared_info.raw_array, old_script);
link_to_old_script_report.push( { name: shared_info.function_name } );
}
var old_source = script.source;
var change_len_old = change_len;
var change_len_new = new_str.length;
// Prepare new source string.
var new_source = old_source.substring(0, change_pos) +
new_str + old_source.substring(change_pos + change_len);
// Find all SharedFunctionInfo's that are compiled from this script.
var shared_raw_list = %LiveEditFindSharedFunctionInfosForScript(script);
var shared_infos = new Array();
for (var i = 0; i < shared_raw_list.length; i++) {
shared_infos.push(new liveedit.SharedInfoWrapper(shared_raw_list[i]));
}
// Gather compile information about old version of script.
var old_compile_info = DebugGatherCompileInfo(old_source);
// Gather compile information about new version of script.
var new_compile_info;
try {
new_compile_info = DebugGatherCompileInfo(new_source);
} catch (e) {
throw new liveedit.Failure("Failed to compile new version of script: " + e);
}
// An index of a single function, that is going to have its code replaced.
var function_being_patched =
FindChangedFunction(old_compile_info, change_pos, change_len_old);
// In old and new script versions function with a change should have the
// same indexes.
var function_being_patched2 =
FindChangedFunction(new_compile_info, change_pos, change_len_new);
Assert(function_being_patched == function_being_patched2,
"inconsistent old/new compile info");
// Check that function being patched has the same expectations in a new
// version. Otherwise we cannot safely patch its behavior and should
// choose the outer function instead.
while (!liveedit.CompareFunctionExpectations(
old_compile_info[function_being_patched],
new_compile_info[function_being_patched])) {
Assert(old_compile_info[function_being_patched].outer_index ==
new_compile_info[function_being_patched].outer_index);
function_being_patched =
old_compile_info[function_being_patched].outer_index;
Assert(function_being_patched != -1);
}
// Check that function being patched is not currently on stack.
liveedit.CheckStackActivations(
[ FindFunctionInfo(function_being_patched) ], change_log );
// Committing all changes.
var old_script_name = liveedit.CreateNameForOldScript(script);
// Update the script text and create a new script representing an old
// version of the script.
var old_script = %LiveEditReplaceScript(script, new_source, old_script_name);
PatchCode(new_compile_info[function_being_patched],
FindFunctionInfo(function_being_patched));
var position_patch_report = new Array();
change_log.push( {position_patched: position_patch_report} );
var position_change_array = [ change_pos,
change_pos + change_len_old,
change_pos + change_len_new ];
// Update positions of all outer functions (i.e. all functions, that
// are partially below the function being patched).
for (var i = new_compile_info[function_being_patched].outer_index;
i != -1;
i = new_compile_info[i].outer_index) {
PatchPositions(new_compile_info[i], FindFunctionInfo(i));
}
// Update positions of all functions that are fully below the function
// being patched.
var old_next_sibling =
old_compile_info[function_being_patched].next_sibling_index;
var new_next_sibling =
new_compile_info[function_being_patched].next_sibling_index;
// We simply go over the tail of both old and new lists. Their tails should
// have an identical structure.
if (old_next_sibling == -1) {
Assert(new_next_sibling == -1);
} else {
Assert(old_compile_info.length - old_next_sibling ==
new_compile_info.length - new_next_sibling);
for (var i = old_next_sibling, j = new_next_sibling;
i < old_compile_info.length; i++, j++) {
PatchPositions(new_compile_info[j], FindFunctionInfo(i));
}
}
var link_to_old_script_report = new Array();
change_log.push( { linked_to_old_script: link_to_old_script_report } );
// We need to link to old script all former nested functions.
for (var i = function_being_patched + 1; i < old_next_sibling; i++) {
LinkToOldScript(FindFunctionInfo(i), old_script);
}
}
Debug.LiveEditChangeScript.Assert = function(condition, message) {
if (!condition) {
if (message) {
throw "Assert " + message;
} else {
throw "Assert";
}
}
}
// An object describing function compilation details. Its index fields
// apply to indexes inside array that stores these objects.
Debug.LiveEditChangeScript.FunctionCompileInfo = function(raw_array) {
this.function_name = raw_array[0];
this.start_position = raw_array[1];
this.end_position = raw_array[2];
this.param_num = raw_array[3];
this.code = raw_array[4];
this.scope_info = raw_array[5];
this.outer_index = raw_array[6];
this.next_sibling_index = null;
this.raw_array = raw_array;
}
// A structure describing SharedFunctionInfo.
Debug.LiveEditChangeScript.SharedInfoWrapper = function(raw_array) {
this.function_name = raw_array[0];
this.start_position = raw_array[1];
this.end_position = raw_array[2];
this.info = raw_array[3];
this.raw_array = raw_array;
}
// Adds a suffix to script name to mark that it is old version.
Debug.LiveEditChangeScript.CreateNameForOldScript = function(script) {
// TODO(635): try better than this; support several changes.
return script.name + " (old)";
}
// Compares a function interface old and new version, whether it
// changed or not.
Debug.LiveEditChangeScript.CompareFunctionExpectations =
function(function_info1, function_info2) {
// Check that function has the same number of parameters (there may exist
// an adapter, that won't survive function parameter number change).
if (function_info1.param_num != function_info2.param_num) {
return false;
}
var scope_info1 = function_info1.scope_info;
var scope_info2 = function_info2.scope_info;
if (!scope_info1) {
return !scope_info2;
}
if (scope_info1.length != scope_info2.length) {
return false;
}
// Check that outer scope structure is not changed. Otherwise the function
// will not properly work with existing scopes.
return scope_info1.toString() == scope_info2.toString();
}
// For array of wrapped shared function infos checks that none of them
// have activations on stack (of any thread). Throws a Failure exception
// if this proves to be false.
Debug.LiveEditChangeScript.CheckStackActivations = function(shared_wrapper_list,
change_log) {
var liveedit = Debug.LiveEditChangeScript;
var shared_list = new Array();
for (var i = 0; i < shared_wrapper_list.length; i++) {
shared_list[i] = shared_wrapper_list[i].info;
}
var result = %LiveEditCheckStackActivations(shared_list);
var problems = new Array();
for (var i = 0; i < shared_list.length; i++) {
if (result[i] == liveedit.FunctionPatchabilityStatus.FUNCTION_BLOCKED_ON_STACK) {
var shared = shared_list[i];
var description = {
name: shared.function_name,
start_pos: shared.start_position,
end_pos: shared.end_position
};
problems.push(description);
}
}
if (problems.length > 0) {
change_log.push( { functions_on_stack: problems } );
throw new liveedit.Failure("Blocked by functions on stack");
}
}
// A copy of the FunctionPatchabilityStatus enum from liveedit.h
Debug.LiveEditChangeScript.FunctionPatchabilityStatus = {
FUNCTION_AVAILABLE_FOR_PATCH: 0,
FUNCTION_BLOCKED_ON_STACK: 1
}
// A logical failure in liveedit process. This means that change_log
// is valid and consistent description of what happened.
Debug.LiveEditChangeScript.Failure = function(message) {
this.message = message;
}
Debug.LiveEditChangeScript.Failure.prototype.toString = function() {
return "LiveEdit Failure: " + this.message;
}

404
deps/v8/src/liveedit.cc

@ -39,49 +39,445 @@ namespace v8 {
namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
static void CompileScriptForTracker(Handle<Script> script) {
const bool is_eval = false;
const bool is_global = true;
// TODO(635): support extensions.
Extension* extension = NULL;
PostponeInterruptsScope postpone;
// Only allow non-global compiles for eval.
ASSERT(is_eval || is_global);
// Build AST.
ScriptDataImpl* pre_data = NULL;
FunctionLiteral* lit = MakeAST(is_global, script, extension, pre_data);
// Check for parse errors.
if (lit == NULL) {
ASSERT(Top::has_pending_exception());
return;
}
// Compile the code.
CompilationInfo info(lit, script, is_eval);
Handle<Code> code = MakeCodeForLiveEdit(&info);
// Check for stack-overflow exceptions.
if (code.is_null()) {
Top::StackOverflow();
return;
}
}
// Unwraps JSValue object, returning its field "value"
static Handle<Object> UnwrapJSValue(Handle<JSValue> jsValue) {
return Handle<Object>(jsValue->value());
}
// Wraps any object into a OpaqueReference, that will hide the object
// from JavaScript.
static Handle<JSValue> WrapInJSValue(Object* object) {
Handle<JSFunction> constructor = Top::opaque_reference_function();
Handle<JSValue> result =
Handle<JSValue>::cast(Factory::NewJSObject(constructor));
result->set_value(object);
return result;
}
// Simple helper class that creates more or less typed structures over
// JSArray object. This is an adhoc method of passing structures from C++
// to JavaScript.
template<typename S>
class JSArrayBasedStruct {
public:
static S Create() {
Handle<JSArray> array = Factory::NewJSArray(S::kSize_);
return S(array);
}
static S cast(Object* object) {
JSArray* array = JSArray::cast(object);
Handle<JSArray> array_handle(array);
return S(array_handle);
}
explicit JSArrayBasedStruct(Handle<JSArray> array) : array_(array) {
}
Handle<JSArray> GetJSArray() {
return array_;
}
protected:
void SetField(int field_position, Handle<Object> value) {
SetElement(array_, field_position, value);
}
void SetSmiValueField(int field_position, int value) {
SetElement(array_, field_position, Handle<Smi>(Smi::FromInt(value)));
}
Object* GetField(int field_position) {
return array_->GetElement(field_position);
}
int GetSmiValueField(int field_position) {
Object* res = GetField(field_position);
return Smi::cast(res)->value();
}
private:
Handle<JSArray> array_;
};
// Represents some function compilation details. This structure will be used
// from JavaScript. It contains Code object, which is kept wrapped
// into a BlindReference for sanitizing reasons.
class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
public:
explicit FunctionInfoWrapper(Handle<JSArray> array)
: JSArrayBasedStruct<FunctionInfoWrapper>(array) {
}
void SetInitialProperties(Handle<String> name, int start_position,
int end_position, int param_num, int parent_index) {
HandleScope scope;
this->SetField(kFunctionNameOffset_, name);
this->SetSmiValueField(kStartPositionOffset_, start_position);
this->SetSmiValueField(kEndPositionOffset_, end_position);
this->SetSmiValueField(kParamNumOffset_, param_num);
this->SetSmiValueField(kParentIndexOffset_, parent_index);
}
void SetFunctionCode(Handle<Code> function_code) {
Handle<JSValue> wrapper = WrapInJSValue(*function_code);
this->SetField(kCodeOffset_, wrapper);
}
void SetScopeInfo(Handle<JSArray> scope_info_array) {
this->SetField(kScopeInfoOffset_, scope_info_array);
}
int GetParentIndex() {
return this->GetSmiValueField(kParentIndexOffset_);
}
Handle<Code> GetFunctionCode() {
Handle<Object> raw_result = UnwrapJSValue(Handle<JSValue>(
JSValue::cast(this->GetField(kCodeOffset_))));
return Handle<Code>::cast(raw_result);
}
int GetStartPosition() {
return this->GetSmiValueField(kStartPositionOffset_);
}
int GetEndPosition() {
return this->GetSmiValueField(kEndPositionOffset_);
}
private:
static const int kFunctionNameOffset_ = 0;
static const int kStartPositionOffset_ = 1;
static const int kEndPositionOffset_ = 2;
static const int kParamNumOffset_ = 3;
static const int kCodeOffset_ = 4;
static const int kScopeInfoOffset_ = 5;
static const int kParentIndexOffset_ = 6;
static const int kSize_ = 7;
friend class JSArrayBasedStruct<FunctionInfoWrapper>;
};
// Wraps SharedFunctionInfo along with some of its fields for passing it
// back to JavaScript. SharedFunctionInfo object itself is additionally
// wrapped into BlindReference for sanitizing reasons.
class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
public:
explicit SharedInfoWrapper(Handle<JSArray> array)
: JSArrayBasedStruct<SharedInfoWrapper>(array) {
}
void SetProperties(Handle<String> name, int start_position, int end_position,
Handle<SharedFunctionInfo> info) {
HandleScope scope;
this->SetField(kFunctionNameOffset_, name);
Handle<JSValue> info_holder = WrapInJSValue(*info);
this->SetField(kSharedInfoOffset_, info_holder);
this->SetSmiValueField(kStartPositionOffset_, start_position);
this->SetSmiValueField(kEndPositionOffset_, end_position);
}
Handle<SharedFunctionInfo> GetInfo() {
Object* element = this->GetField(kSharedInfoOffset_);
Handle<JSValue> value_wrapper(JSValue::cast(element));
Handle<Object> raw_result = UnwrapJSValue(value_wrapper);
return Handle<SharedFunctionInfo>::cast(raw_result);
}
private:
static const int kFunctionNameOffset_ = 0;
static const int kStartPositionOffset_ = 1;
static const int kEndPositionOffset_ = 2;
static const int kSharedInfoOffset_ = 3;
static const int kSize_ = 4;
friend class JSArrayBasedStruct<SharedInfoWrapper>;
};
class FunctionInfoListener {
public:
FunctionInfoListener() {
current_parent_index_ = -1;
len_ = 0;
result_ = Factory::NewJSArray(10);
}
void FunctionStarted(FunctionLiteral* fun) {
// Implementation follows.
HandleScope scope;
FunctionInfoWrapper info = FunctionInfoWrapper::Create();
info.SetInitialProperties(fun->name(), fun->start_position(),
fun->end_position(), fun->num_parameters(),
current_parent_index_);
current_parent_index_ = len_;
SetElement(result_, len_, info.GetJSArray());
len_++;
}
void FunctionDone() {
// Implementation follows.
HandleScope scope;
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_));
current_parent_index_ = info.GetParentIndex();
}
void FunctionScope(Scope* scope) {
// Implementation follows.
HandleScope handle_scope;
Handle<JSArray> scope_info_list = Factory::NewJSArray(10);
int scope_info_length = 0;
// Saves some description of scope. It stores name and indexes of
// variables in the whole scope chain. Null-named slots delimit
// scopes of this chain.
Scope* outer_scope = scope->outer_scope();
if (outer_scope == NULL) {
return;
}
do {
ZoneList<Variable*> list(10);
outer_scope->CollectUsedVariables(&list);
int j = 0;
for (int i = 0; i < list.length(); i++) {
Variable* var1 = list[i];
Slot* slot = var1->slot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
if (j != i) {
list[j] = var1;
}
j++;
}
}
// Sort it.
for (int k = 1; k < j; k++) {
int l = k;
for (int m = k + 1; m < j; m++) {
if (list[l]->slot()->index() > list[m]->slot()->index()) {
l = m;
}
}
list[k] = list[l];
}
for (int i = 0; i < j; i++) {
SetElement(scope_info_list, scope_info_length, list[i]->name());
scope_info_length++;
SetElement(scope_info_list, scope_info_length,
Handle<Smi>(Smi::FromInt(list[i]->slot()->index())));
scope_info_length++;
}
SetElement(scope_info_list, scope_info_length,
Handle<Object>(Heap::null_value()));
scope_info_length++;
outer_scope = outer_scope->outer_scope();
} while (outer_scope != NULL);
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_));
info.SetScopeInfo(scope_info_list);
}
void FunctionCode(Handle<Code> function_code) {
// Implementation follows.
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_));
info.SetFunctionCode(function_code);
}
Handle<JSArray> GetResult() {
return result_;
}
private:
Handle<JSArray> result_;
int len_;
int current_parent_index_;
};
static FunctionInfoListener* active_function_info_listener = NULL;
JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
Handle<String> source) {
CompilationZoneScope zone_scope(DELETE_ON_EXIT);
FunctionInfoListener listener;
Handle<Object> original_source = Handle<Object>(script->source());
script->set_source(*source);
active_function_info_listener = &listener;
CompileScriptForTracker(script);
active_function_info_listener = NULL;
script->set_source(*original_source);
return *(listener.GetResult());
}
void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) {
HandleScope scope;
int len = Smi::cast(array->length())->value();
for (int i = 0; i < len; i++) {
Handle<SharedFunctionInfo> info(
SharedFunctionInfo::cast(array->GetElement(i)));
SharedInfoWrapper info_wrapper = SharedInfoWrapper::Create();
Handle<String> name_handle(String::cast(info->name()));
info_wrapper.SetProperties(name_handle, info->start_position(),
info->end_position(), info);
array->SetElement(i, *(info_wrapper.GetJSArray()));
}
}
void LiveEdit::ReplaceFunctionCode(Handle<JSArray> new_compile_info_array,
Handle<JSArray> shared_info_array) {
HandleScope scope;
FunctionInfoWrapper compile_info_wrapper(new_compile_info_array);
SharedInfoWrapper shared_info_wrapper(shared_info_array);
Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
shared_info->set_code(*(compile_info_wrapper.GetFunctionCode()),
UPDATE_WRITE_BARRIER);
shared_info->set_start_position(compile_info_wrapper.GetStartPosition());
shared_info->set_end_position(compile_info_wrapper.GetEndPosition());
// update breakpoints, original code, constructor stub
}
void LiveEdit::RelinkFunctionToScript(Handle<JSArray> shared_info_array,
Handle<Script> script_handle) {
SharedInfoWrapper shared_info_wrapper(shared_info_array);
Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
shared_info->set_script(*script_handle);
}
// For a script text change (defined as position_change_array), translates
// position in unchanged text to position in changed text.
// Text change is a set of non-overlapping regions in text, that have changed
// their contents and length. It is specified as array of groups of 3 numbers:
// (change_begin, change_end, change_end_new_position).
// Each group describes a change in text; groups are sorted by change_begin.
// Only position in text beyond any changes may be successfully translated.
// If a positions is inside some region that changed, result is currently
// undefined.
static int TranslatePosition(int original_position,
Handle<JSArray> position_change_array) {
int position_diff = 0;
int array_len = Smi::cast(position_change_array->length())->value();
// TODO(635): binary search may be used here
for (int i = 0; i < array_len; i += 3) {
int chunk_start =
Smi::cast(position_change_array->GetElement(i))->value();
int chunk_end =
Smi::cast(position_change_array->GetElement(i + 1))->value();
int chunk_changed_end =
Smi::cast(position_change_array->GetElement(i + 2))->value();
position_diff = chunk_changed_end - chunk_end;
if (original_position < chunk_start) {
break;
}
// Position mustn't be inside a chunk.
ASSERT(original_position >= chunk_end);
}
return original_position + position_diff;
}
void LiveEdit::PatchFunctionPositions(Handle<JSArray> shared_info_array,
Handle<JSArray> position_change_array) {
SharedInfoWrapper shared_info_wrapper(shared_info_array);
Handle<SharedFunctionInfo> info = shared_info_wrapper.GetInfo();
info->set_start_position(TranslatePosition(info->start_position(),
position_change_array));
info->set_end_position(TranslatePosition(info->end_position(),
position_change_array));
// Also patch rinfos (both in working code and original code), breakpoints.
}
LiveEditFunctionTracker::LiveEditFunctionTracker(FunctionLiteral* fun) {
if (active_function_info_listener != NULL) {
active_function_info_listener->FunctionStarted(fun);
}
}
LiveEditFunctionTracker::~LiveEditFunctionTracker() {
if (active_function_info_listener != NULL) {
active_function_info_listener->FunctionDone();
}
}
void LiveEditFunctionTracker::RecordFunctionCode(Handle<Code> code) {
if (active_function_info_listener != NULL) {
active_function_info_listener->FunctionCode(code);
}
}
void LiveEditFunctionTracker::RecordFunctionScope(Scope* scope) {
if (active_function_info_listener != NULL) {
active_function_info_listener->FunctionScope(scope);
}
}
bool LiveEditFunctionTracker::IsActive() {
return active_function_info_listener != NULL;
}
#else // ENABLE_DEBUGGER_SUPPORT
// This ifdef-else-endif section provides working or stub implementation of
// LiveEditFunctionTracker.
LiveEditFunctionTracker::LiveEditFunctionTracker(FunctionLiteral* fun) {
}
LiveEditFunctionTracker::~LiveEditFunctionTracker() {
}
void LiveEditFunctionTracker::RecordFunctionCode(Handle<Code> code) {
}
void LiveEditFunctionTracker::RecordFunctionScope(Scope* scope) {
}
bool LiveEditFunctionTracker::IsActive() {
return false;
}
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal

28
deps/v8/src/liveedit.h

@ -73,6 +73,34 @@ class LiveEditFunctionTracker {
static bool IsActive();
};
#ifdef ENABLE_DEBUGGER_SUPPORT
class LiveEdit : AllStatic {
public:
static JSArray* GatherCompileInfo(Handle<Script> script,
Handle<String> source);
static void WrapSharedFunctionInfos(Handle<JSArray> array);
static void ReplaceFunctionCode(Handle<JSArray> new_compile_info_array,
Handle<JSArray> shared_info_array);
static void RelinkFunctionToScript(Handle<JSArray> shared_info_array,
Handle<Script> script_handle);
static void PatchFunctionPositions(Handle<JSArray> shared_info_array,
Handle<JSArray> position_change_array);
// A copy of this is in liveedit-delay.js.
enum FunctionPatchabilityStatus {
FUNCTION_AVAILABLE_FOR_PATCH = 0,
FUNCTION_BLOCKED_ON_STACK = 1
};
};
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal
#endif /* V*_LIVEEDIT_H_ */

34
deps/v8/src/log.cc

@ -329,7 +329,7 @@ VMState Logger::bottom_state_(EXTERNAL);
SlidingStateWindow* Logger::sliding_state_window_ = NULL;
const char** Logger::log_events_ = NULL;
CompressionHelper* Logger::compression_helper_ = NULL;
bool Logger::is_logging_ = false;
int Logger::logging_nesting_ = 0;
int Logger::cpu_profiler_nesting_ = 0;
int Logger::heap_profiler_nesting_ = 0;
@ -389,12 +389,19 @@ void Logger::UncheckedStringEvent(const char* name, const char* value) {
void Logger::IntEvent(const char* name, int value) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log) return;
if (FLAG_log) UncheckedIntEvent(name, value);
#endif
}
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::UncheckedIntEvent(const char* name, int value) {
if (!Log::IsEnabled()) return;
LogMessageBuilder msg;
msg.Append("%s,%d\n", name, value);
msg.WriteToLogFile();
#endif
}
#endif
void Logger::HandleEvent(const char* name, Object** location) {
@ -1169,19 +1176,18 @@ void Logger::PauseProfiler(int flags, int tag) {
// Must be the same message as Log::kDynamicBufferSeal.
LOG(UncheckedStringEvent("profiler", "pause"));
}
--logging_nesting_;
}
}
if (flags &
(PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
if (--heap_profiler_nesting_ == 0) {
FLAG_log_gc = false;
--logging_nesting_;
}
}
if (tag != 0) {
IntEvent("close-tag", tag);
}
if (GetActiveProfilerModules() == PROFILER_MODULE_NONE) {
is_logging_ = false;
UncheckedIntEvent("close-tag", tag);
}
}
@ -1189,11 +1195,11 @@ void Logger::PauseProfiler(int flags, int tag) {
void Logger::ResumeProfiler(int flags, int tag) {
if (!Log::IsEnabled()) return;
if (tag != 0) {
IntEvent("open-tag", tag);
UncheckedIntEvent("open-tag", tag);
}
if (flags & PROFILER_MODULE_CPU) {
if (cpu_profiler_nesting_++ == 0) {
is_logging_ = true;
++logging_nesting_;
if (FLAG_prof_lazy) {
profiler_->Engage();
LOG(UncheckedStringEvent("profiler", "resume"));
@ -1209,7 +1215,7 @@ void Logger::ResumeProfiler(int flags, int tag) {
if (flags &
(PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
if (heap_profiler_nesting_++ == 0) {
is_logging_ = true;
++logging_nesting_;
FLAG_log_gc = true;
}
}
@ -1261,6 +1267,8 @@ void Logger::LogCodeObject(Object* object) {
switch (code_object->kind()) {
case Code::FUNCTION:
return; // We log this later using LogCompiledFunctions.
case Code::BINARY_OP_IC:
// fall through
case Code::STUB:
description = CodeStub::MajorName(code_object->major_key(), true);
if (description == NULL)
@ -1482,14 +1490,16 @@ bool Logger::Setup() {
compression_helper_ = new CompressionHelper(kCompressionWindowSize);
}
is_logging_ = start_logging;
if (start_logging) {
logging_nesting_ = 1;
}
if (FLAG_prof) {
profiler_ = new Profiler();
if (!FLAG_prof_auto) {
profiler_->pause();
} else {
is_logging_ = true;
logging_nesting_ = 1;
}
if (!FLAG_prof_lazy) {
profiler_->Engage();

7
deps/v8/src/log.h

@ -265,7 +265,7 @@ class Logger {
}
static bool is_logging() {
return is_logging_;
return logging_nesting_ > 0;
}
// Pause/Resume collection of profiling data.
@ -330,6 +330,9 @@ class Logger {
// Logs a StringEvent regardless of whether FLAG_log is true.
static void UncheckedStringEvent(const char* name, const char* value);
// Logs an IntEvent regardless of whether FLAG_log is true.
static void UncheckedIntEvent(const char* name, int value);
// Stops logging and profiling in case of insufficient resources.
static void StopLoggingAndProfiling();
@ -372,7 +375,7 @@ class Logger {
friend class LoggerTestHelper;
static bool is_logging_;
static int logging_nesting_;
static int cpu_profiler_nesting_;
static int heap_profiler_nesting_;
#else

10
deps/v8/src/macros.py

@ -73,6 +73,16 @@ const kDayMask = 0x01f;
const kYearShift = 9;
const kMonthShift = 5;
# Limits for parts of the date, so that we support all the dates that
# ECMA 262 - 15.9.1.1 requires us to, but at the same time be sure that
# the date (days since 1970) is in SMI range.
const kMinYear = -1000000;
const kMaxYear = 1000000;
const kMinMonth = -10000000;
const kMaxMonth = 10000000;
const kMinDate = -100000000;
const kMaxDate = 100000000;
# Type query macros.
#
# Note: We have special support for typeof(foo) === 'bar' in the compiler.

8
deps/v8/src/math.js

@ -84,7 +84,7 @@ function MathCeil(x) {
// ECMA 262 - 15.8.2.7
function MathCos(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
return %Math_cos(x);
return %_Math_cos(x);
}
// ECMA 262 - 15.8.2.8
@ -159,7 +159,7 @@ function MathMin(arg1, arg2) { // length == 2
function MathPow(x, y) {
if (!IS_NUMBER(x)) x = ToNumber(x);
if (!IS_NUMBER(y)) y = ToNumber(y);
return %Math_pow(x, y);
return %_Math_pow(x, y);
}
// ECMA 262 - 15.8.2.14
@ -176,13 +176,13 @@ function MathRound(x) {
// ECMA 262 - 15.8.2.16
function MathSin(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
return %Math_sin(x);
return %_Math_sin(x);
}
// ECMA 262 - 15.8.2.17
function MathSqrt(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
return %Math_sqrt(x);
return %_Math_sqrt(x);
}
// ECMA 262 - 15.8.2.18

1
deps/v8/src/messages.cc

@ -30,6 +30,7 @@
#include "api.h"
#include "execution.h"
#include "messages.h"
#include "spaces-inl.h"
#include "top.h"

1
deps/v8/src/messages.js

@ -127,6 +127,7 @@ function FormatMessage(message) {
malformed_regexp: "Invalid regular expression: /%0/: %1",
unterminated_regexp: "Invalid regular expression: missing /",
regexp_flags: "Cannot supply flags when constructing one RegExp from another",
incompatible_method_receiver: "Method %0 called on incompatible receiver %1",
invalid_lhs_in_assignment: "Invalid left-hand side in assignment",
invalid_lhs_in_for_in: "Invalid left-hand side in for-in",
invalid_lhs_in_postfix_op: "Invalid left-hand side expression in postfix operation",

45
deps/v8/src/mips/codegen-mips.cc

@ -81,7 +81,7 @@ CodeGenerator::CodeGenerator(MacroAssembler* masm)
// a1: called JS function
// cp: callee's context
void CodeGenerator::Generate(CompilationInfo* info, Mode mode) {
void CodeGenerator::Generate(CompilationInfo* infomode) {
UNIMPLEMENTED_MIPS();
}
@ -292,6 +292,16 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
}
void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
// This should generate code that performs a charCodeAt() call or returns
// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
// It is not yet implemented on ARM, so it always goes to the slow case.
@ -300,6 +310,11 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
}
void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
@ -457,6 +472,34 @@ void CEntryStub::Generate(MacroAssembler* masm) {
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
UNIMPLEMENTED_MIPS();
// Save callee saved registers on the stack.
__ MultiPush(kCalleeSaved | ra.bit());
// ********** State **********
//
// * Registers:
// a0: entry_address
// a1: function
// a2: reveiver_pointer
// a3: argc
//
// * Stack:
// ---------------------------
// args
// ---------------------------
// 4 args slots
// ---------------------------
// callee saved registers + ra
// ---------------------------
//
// ***************************
__ break_(0x1234);
// Restore callee saved registers from the stack.
__ MultiPop(kCalleeSaved | ra.bit());
// Load a result.
__ li(v0, Operand(0x1234));
__ jr(ra);

14
deps/v8/src/mips/codegen-mips.h

@ -157,11 +157,10 @@ class CodeGenerator: public AstVisitor {
private:
// Construction/Destruction.
explicit CodeGenerator(MacroAssembler* masm);
virtual ~CodeGenerator() { delete masm_; }
// Accessors.
inline bool is_eval();
Scope* scope() const { return scope_; }
inline Scope* scope();
// Generating deferred code.
void ProcessDeferred();
@ -184,7 +183,7 @@ class CodeGenerator: public AstVisitor {
#undef DEF_VISIT
// Main code generation function
void Generate(CompilationInfo* info, Mode mode);
void Generate(CompilationInfo* info);
struct InlineRuntimeLUT {
void (CodeGenerator::*method)(ZoneList<Expression*>*);
@ -227,6 +226,9 @@ class CodeGenerator: public AstVisitor {
// Fast support for charCodeAt(n).
void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
// Fast support for string.charAt(n) and string[n].
void GenerateCharFromCode(ZoneList<Expression*>* args);
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
@ -244,6 +246,11 @@ class CodeGenerator: public AstVisitor {
void GenerateRegExpExec(ZoneList<Expression*>* args);
void GenerateNumberToString(ZoneList<Expression*>* args);
// Fast support for Math.pow().
void GenerateMathPow(ZoneList<Expression*>* args);
// Fast support for Math.sqrt().
void GenerateMathPow(ZoneList<Expression*>* args);
// Fast support for Math.sin and Math.cos.
inline void GenerateMathSin(ZoneList<Expression*>* args);
@ -302,6 +309,7 @@ class CodeGenerator: public AstVisitor {
friend class JumpTarget;
friend class Reference;
friend class FastCodeGenerator;
friend class FullCodeGenerator;
friend class FullCodeGenSyntaxChecker;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);

20
deps/v8/src/mips/fast-codegen-mips.cc

@ -35,6 +35,14 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
Register FastCodeGenerator::accumulator0() { return no_reg; }
Register FastCodeGenerator::accumulator1() { return no_reg; }
Register FastCodeGenerator::scratch0() { return no_reg; }
Register FastCodeGenerator::scratch1() { return no_reg; }
Register FastCodeGenerator::receiver_reg() { return no_reg; }
Register FastCodeGenerator::context_reg() { return no_reg; }
void FastCodeGenerator::Generate(CompilationInfo* info) {
UNIMPLEMENTED_MIPS();
}
@ -45,7 +53,17 @@ void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
}
void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) {
void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> name) {
UNIMPLEMENTED_MIPS();
}
void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
UNIMPLEMENTED_MIPS();
}
void FastCodeGenerator::EmitBitOr() {
UNIMPLEMENTED_MIPS();
}

5
deps/v8/src/mips/full-codegen-mips.cc

@ -146,6 +146,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
UNIMPLEMENTED_MIPS();
}

25
deps/v8/src/mips/ic-mips.cc

@ -90,11 +90,6 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
void LoadIC::GenerateMiss(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
}
void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
UNIMPLEMENTED_MIPS();
}
@ -120,11 +115,6 @@ Object* KeyedLoadIC_Miss(Arguments args);
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
}
void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
UNIMPLEMENTED_MIPS();
}
@ -145,24 +135,23 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
}
void KeyedStoreIC::Generate(MacroAssembler* masm,
const ExternalReference& f) {
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
UNIMPLEMENTED_MIPS();
}
void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
@ -172,12 +161,12 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
}
void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
void StoreIC::GenerateMiss(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
void StoreIC::GenerateMiss(MacroAssembler* masm) {
void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}

1
deps/v8/src/mips/jump-target-mips.cc

@ -31,6 +31,7 @@
#include "codegen-inl.h"
#include "jump-target-inl.h"
#include "register-allocator-inl.h"
#include "virtual-frame-inl.h"
namespace v8 {
namespace internal {

31
deps/v8/src/mips/macro-assembler-mips.cc

@ -422,7 +422,7 @@ void MacroAssembler::MultiPopReversed(RegList regs) {
// Trashes the at register if no scratch register is provided.
void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs,
const Operand& rt, Register scratch) {
Register r2;
Register r2 = no_reg;
if (rt.is_reg()) {
// We don't want any other register but scratch clobbered.
ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
@ -489,7 +489,7 @@ void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs,
void MacroAssembler::Branch(Condition cond, Label* L, Register rs,
const Operand& rt, Register scratch) {
Register r2;
Register r2 = no_reg;
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
@ -559,7 +559,7 @@ void MacroAssembler::Branch(Condition cond, Label* L, Register rs,
// cases, so we keep slt and add an intermediate third instruction.
void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
const Operand& rt, Register scratch) {
Register r2;
Register r2 = no_reg;
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
@ -634,7 +634,7 @@ void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs,
const Operand& rt, Register scratch) {
Register r2;
Register r2 = no_reg;
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
@ -787,6 +787,16 @@ void MacroAssembler::Call(Label* target) {
}
#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
// Debugger Support
void MacroAssembler::DebugBreak() {
UNIMPLEMENTED_MIPS();
}
#endif
// ---------------------------------------------------------------------------
// Exception handling
@ -826,14 +836,21 @@ void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
}
void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
int num_arguments,
int result_size) {
UNIMPLEMENTED_MIPS();
}
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
UNIMPLEMENTED_MIPS();
TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
}
void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
UNIMPLEMENTED_MIPS();
}

47
deps/v8/src/mips/macro-assembler-mips.h

@ -115,11 +115,7 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
Condition cond, Register src1, const Operand& src2);
// Sets the remembered set bit for [address+offset], where address is the
// address of the heap object 'object'. The address must be in the first 8K
// of an allocated page. The 'scratch' register is used in the
// implementation and all 3 registers are clobbered by the operation, as
// well as the ip register.
// Sets the remembered set bit for [address+offset].
void RecordWrite(Register object, Register offset, Register scratch);
@ -182,19 +178,8 @@ class MacroAssembler: public Assembler {
// Push multiple registers on the stack.
// With MultiPush, lower registers are pushed first on the stack.
// For example if you push t0, t1, s0, and ra you get:
// | |
// |-----------------------|
// | t0 | +
// |-----------------------| |
// | t1 | |
// |-----------------------| |
// | s0 | v
// |-----------------------| -
// | ra |
// |-----------------------|
// | |
// Registers are saved in numerical order, with higher numbered registers
// saved in higher memory addresses
void MultiPush(RegList regs);
void MultiPushReversed(RegList regs);
void Push(Register src) {
@ -224,6 +209,20 @@ class MacroAssembler: public Assembler {
}
#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
// Debugger Support
void SaveRegistersToMemory(RegList regs);
void RestoreRegistersFromMemory(RegList regs);
void CopyRegistersFromMemoryToStack(Register base, RegList regs);
void CopyRegistersFromStackToMemory(Register base,
Register scratch,
RegList regs);
void DebugBreak();
#endif
// ---------------------------------------------------------------------------
// Exception handling
@ -268,21 +267,25 @@ class MacroAssembler: public Assembler {
void StubReturn(int argc);
// Call a runtime routine.
// Eventually this should be used for all C calls.
void CallRuntime(Runtime::Function* f, int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments);
// Tail call of a runtime routine (jump).
// Like JumpToRuntime, but also takes care of passing the number
// Like JumpToExternalReference, but also takes care of passing the number
// of parameters.
void TailCallRuntime(const ExternalReference& ext,
void TailCallExternalReference(const ExternalReference& ext,
int num_arguments,
int result_size);
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size);
// Jump to the builtin routine.
void JumpToRuntime(const ExternalReference& builtin);
void JumpToExternalReference(const ExternalReference& builtin);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.

31
deps/v8/src/mips/stub-cache-mips.cc

@ -72,20 +72,6 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
}
// Generate code to load the length from a string object and return the length.
// If the receiver object is not a string or a wrapped string object the
// execution continues at the miss label. The register containing the
// receiver is potentially clobbered.
void StubCompiler::GenerateLoadStringLength2(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
Label* miss) {
UNIMPLEMENTED_MIPS();
__ break_(0x249);
}
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
@ -99,7 +85,6 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
// After executing generated code, the receiver_reg and name_reg
// may be clobbered.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Builtins::Name storage_extend,
JSObject* object,
int index,
Map* transition,
@ -120,18 +105,6 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
#define __ ACCESS_MASM(masm())
Register StubCompiler::CheckPrototypes(JSObject* object,
Register object_reg,
JSObject* holder,
Register holder_reg,
Register scratch,
String* name,
Label* miss) {
UNIMPLEMENTED_MIPS();
return at; // UNIMPLEMENTED RETURN
}
void StubCompiler::GenerateLoadField(JSObject* object,
JSObject* holder,
Register receiver,
@ -192,7 +165,7 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
}
Object* CallStubCompiler::CompileCallField(Object* object,
Object* CallStubCompiler::CompileCallField(JSObject* object,
JSObject* holder,
int index,
String* name) {
@ -211,7 +184,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
}
Object* CallStubCompiler::CompileCallInterceptor(Object* object,
Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
JSObject* holder,
String* name) {
UNIMPLEMENTED_MIPS();

12
deps/v8/src/mips/virtual-frame-mips.cc

@ -32,6 +32,7 @@
#include "codegen-inl.h"
#include "register-allocator-inl.h"
#include "scopes.h"
#include "virtual-frame-inl.h"
namespace v8 {
namespace internal {
@ -41,17 +42,6 @@ namespace internal {
#define __ ACCESS_MASM(masm())
// On entry to a function, the virtual frame already contains the
// receiver and the parameters. All initial frame elements are in
// memory.
VirtualFrame::VirtualFrame()
: elements_(parameter_count() + local_count() + kPreallocatedElements),
stack_pointer_(parameter_count()) { // 0-based index of TOS.
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::SyncElementBelowStackPointer(int index) {
UNREACHABLE();
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save