Browse Source

Update to V8 2.1.9.1

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
48f5f77713
  1. 17
      deps/v8/ChangeLog
  2. 25
      deps/v8/SConstruct
  3. 15
      deps/v8/include/v8-debug.h
  4. 76
      deps/v8/include/v8.h
  5. 18
      deps/v8/src/SConscript
  6. 97
      deps/v8/src/api.cc
  7. 12
      deps/v8/src/api.h
  8. 2
      deps/v8/src/apinatives.js
  9. 245
      deps/v8/src/arm/assembler-arm.cc
  10. 33
      deps/v8/src/arm/assembler-arm.h
  11. 285
      deps/v8/src/arm/codegen-arm.cc
  12. 44
      deps/v8/src/arm/codegen-arm.h
  13. 24
      deps/v8/src/arm/constants-arm.cc
  14. 20
      deps/v8/src/arm/constants-arm.h
  15. 2
      deps/v8/src/arm/cpu-arm.cc
  16. 242
      deps/v8/src/arm/disasm-arm.cc
  17. 8
      deps/v8/src/arm/full-codegen-arm.cc
  18. 724
      deps/v8/src/arm/ic-arm.cc
  19. 31
      deps/v8/src/arm/jump-target-arm.cc
  20. 54
      deps/v8/src/arm/macro-assembler-arm.cc
  21. 12
      deps/v8/src/arm/macro-assembler-arm.h
  22. 346
      deps/v8/src/arm/simulator-arm.cc
  23. 5
      deps/v8/src/arm/simulator-arm.h
  24. 21
      deps/v8/src/arm/virtual-frame-arm.cc
  25. 67
      deps/v8/src/arm/virtual-frame-arm.h
  26. 10
      deps/v8/src/assembler.cc
  27. 3
      deps/v8/src/assembler.h
  28. 661
      deps/v8/src/ast.cc
  29. 209
      deps/v8/src/ast.h
  30. 669
      deps/v8/src/bootstrapper.cc
  31. 32
      deps/v8/src/bootstrapper.h
  32. 44
      deps/v8/src/builtins.cc
  33. 9
      deps/v8/src/circular-queue-inl.h
  34. 86
      deps/v8/src/circular-queue.cc
  35. 15
      deps/v8/src/circular-queue.h
  36. 36
      deps/v8/src/codegen.cc
  37. 31
      deps/v8/src/codegen.h
  38. 136
      deps/v8/src/compilation-cache.cc
  39. 18
      deps/v8/src/compilation-cache.h
  40. 198
      deps/v8/src/compiler.cc
  41. 41
      deps/v8/src/compiler.h
  42. 2
      deps/v8/src/contexts.h
  43. 18
      deps/v8/src/conversions-inl.h
  44. 7
      deps/v8/src/conversions.h
  45. 2
      deps/v8/src/cpu-profiler.cc
  46. 3
      deps/v8/src/cpu-profiler.h
  47. 9
      deps/v8/src/d8.cc
  48. 289
      deps/v8/src/data-flow.cc
  49. 168
      deps/v8/src/data-flow.h
  50. 1090
      deps/v8/src/date.js
  51. 2132
      deps/v8/src/debug-debugger.js
  52. 60
      deps/v8/src/debug.cc
  53. 67
      deps/v8/src/factory.cc
  54. 22
      deps/v8/src/factory.h
  55. 10
      deps/v8/src/fast-codegen.cc
  56. 8
      deps/v8/src/flag-definitions.h
  57. 44
      deps/v8/src/frame-element.h
  58. 2
      deps/v8/src/frames.cc
  59. 14
      deps/v8/src/full-codegen.cc
  60. 16
      deps/v8/src/globals.h
  61. 100
      deps/v8/src/handles.cc
  62. 14
      deps/v8/src/handles.h
  63. 4
      deps/v8/src/heap-inl.h
  64. 73
      deps/v8/src/heap.cc
  65. 42
      deps/v8/src/heap.h
  66. 859
      deps/v8/src/ia32/codegen-ia32.cc
  67. 19
      deps/v8/src/ia32/codegen-ia32.h
  68. 10
      deps/v8/src/ia32/fast-codegen-ia32.cc
  69. 12
      deps/v8/src/ia32/full-codegen-ia32.cc
  70. 24
      deps/v8/src/ia32/ic-ia32.cc
  71. 4
      deps/v8/src/ia32/register-allocator-ia32.cc
  72. 47
      deps/v8/src/ia32/virtual-frame-ia32.cc
  73. 22
      deps/v8/src/ia32/virtual-frame-ia32.h
  74. 17
      deps/v8/src/ic.cc
  75. 268
      deps/v8/src/json.js
  76. 20
      deps/v8/src/jsregexp.cc
  77. 51
      deps/v8/src/jump-target-heavy-inl.h
  78. 363
      deps/v8/src/jump-target-heavy.cc
  79. 19
      deps/v8/src/jump-target-inl.h
  80. 42
      deps/v8/src/jump-target-light-inl.h
  81. 99
      deps/v8/src/jump-target-light.cc
  82. 268
      deps/v8/src/jump-target.cc
  83. 5
      deps/v8/src/jump-target.h
  84. 431
      deps/v8/src/liveedit-debugger.js
  85. 2
      deps/v8/src/liveedit.h
  86. 7
      deps/v8/src/macros.py
  87. 18
      deps/v8/src/messages.js
  88. 95
      deps/v8/src/mips/builtins-mips.cc
  89. 26
      deps/v8/src/mips/codegen-mips-inl.h
  90. 953
      deps/v8/src/mips/codegen-mips.cc
  91. 119
      deps/v8/src/mips/codegen-mips.h
  92. 3
      deps/v8/src/mips/frames-mips.cc
  93. 2
      deps/v8/src/mips/frames-mips.h
  94. 41
      deps/v8/src/mips/ic-mips.cc
  95. 92
      deps/v8/src/mips/jump-target-mips.cc
  96. 465
      deps/v8/src/mips/macro-assembler-mips.cc
  97. 95
      deps/v8/src/mips/macro-assembler-mips.h
  98. 47
      deps/v8/src/mips/stub-cache-mips.cc
  99. 122
      deps/v8/src/mips/virtual-frame-mips.cc
  100. 89
      deps/v8/src/mips/virtual-frame-mips.h

17
deps/v8/ChangeLog

@ -1,3 +1,20 @@
2010-03-25: Version 2.1.9
Added API support for reattaching a global object to a context.
Extended debugger API with access to the internal debugger context.
Fixed Chromium crashes (issues http://crbug.com/39128 and
http://crbug.com/39160)
2010-03-24: Version 2.1.8
Added fine-grained garbage collection callbacks to the API.
Performance improvements on all platforms.
2010-03-22: Version 2.1.7 2010-03-22: Version 2.1.7
Fixed issue 650. Fixed issue 650.

25
deps/v8/SConstruct

@ -90,7 +90,7 @@ ANDROID_LINKFLAGS = ['-nostdlib',
'-Wl,-z,nocopyreloc', '-Wl,-z,nocopyreloc',
'-Wl,-rpath-link=' + ANDROID_TOP + '/out/target/product/generic/obj/lib', '-Wl,-rpath-link=' + ANDROID_TOP + '/out/target/product/generic/obj/lib',
ANDROID_TOP + '/out/target/product/generic/obj/lib/crtbegin_dynamic.o', ANDROID_TOP + '/out/target/product/generic/obj/lib/crtbegin_dynamic.o',
ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.2.1/lib/gcc/arm-eabi/4.2.1/interwork/libgcc.a', ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.4.0/lib/gcc/arm-eabi/4.4.0/interwork/libgcc.a',
ANDROID_TOP + '/out/target/product/generic/obj/lib/crtend_android.o']; ANDROID_TOP + '/out/target/product/generic/obj/lib/crtend_android.o'];
LIBRARY_FLAGS = { LIBRARY_FLAGS = {
@ -275,6 +275,7 @@ V8_EXTRA_FLAGS = {
'gcc': { 'gcc': {
'all': { 'all': {
'WARNINGFLAGS': ['-Wall', 'WARNINGFLAGS': ['-Wall',
'-Werror',
'-W', '-W',
'-Wno-unused-parameter', '-Wno-unused-parameter',
'-Wnon-virtual-dtor'] '-Wnon-virtual-dtor']
@ -370,7 +371,6 @@ DTOA_EXTRA_FLAGS = {
CCTEST_EXTRA_FLAGS = { CCTEST_EXTRA_FLAGS = {
'all': { 'all': {
'CPPPATH': [join(root_dir, 'src')], 'CPPPATH': [join(root_dir, 'src')],
'LIBS': ['$LIBRARY']
}, },
'gcc': { 'gcc': {
'all': { 'all': {
@ -400,9 +400,10 @@ CCTEST_EXTRA_FLAGS = {
'__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'], '__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'],
'CCFLAGS': ANDROID_FLAGS, 'CCFLAGS': ANDROID_FLAGS,
'CPPPATH': ANDROID_INCLUDES, 'CPPPATH': ANDROID_INCLUDES,
'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib'], 'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib',
ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.4.0/lib/gcc/arm-eabi/4.4.0/interwork'],
'LINKFLAGS': ANDROID_LINKFLAGS, 'LINKFLAGS': ANDROID_LINKFLAGS,
'LIBS': ['log', 'c', 'stdc++', 'm'], 'LIBS': ['log', 'c', 'stdc++', 'm', 'gcc'],
'mode:release': { 'mode:release': {
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG'] 'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
} }
@ -430,7 +431,6 @@ CCTEST_EXTRA_FLAGS = {
SAMPLE_FLAGS = { SAMPLE_FLAGS = {
'all': { 'all': {
'CPPPATH': [join(abspath('.'), 'include')], 'CPPPATH': [join(abspath('.'), 'include')],
'LIBS': ['$LIBRARY'],
}, },
'gcc': { 'gcc': {
'all': { 'all': {
@ -464,9 +464,10 @@ SAMPLE_FLAGS = {
'__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'], '__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'],
'CCFLAGS': ANDROID_FLAGS, 'CCFLAGS': ANDROID_FLAGS,
'CPPPATH': ANDROID_INCLUDES, 'CPPPATH': ANDROID_INCLUDES,
'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib'], 'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib',
ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.4.0/lib/gcc/arm-eabi/4.4.0/interwork'],
'LINKFLAGS': ANDROID_LINKFLAGS, 'LINKFLAGS': ANDROID_LINKFLAGS,
'LIBS': ['log', 'c', 'stdc++', 'm'], 'LIBS': ['log', 'c', 'stdc++', 'm', 'gcc'],
'mode:release': { 'mode:release': {
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG'] 'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
} }
@ -589,9 +590,10 @@ D8_FLAGS = {
'LIBS': ['pthread'], 'LIBS': ['pthread'],
}, },
'os:android': { 'os:android': {
'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib'], 'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib',
ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.4.0/lib/gcc/arm-eabi/4.4.0/interwork'],
'LINKFLAGS': ANDROID_LINKFLAGS, 'LINKFLAGS': ANDROID_LINKFLAGS,
'LIBS': ['log', 'c', 'stdc++', 'm'], 'LIBS': ['log', 'c', 'stdc++', 'm', 'gcc'],
}, },
'os:win32': { 'os:win32': {
'LIBS': ['winmm', 'ws2_32'], 'LIBS': ['winmm', 'ws2_32'],
@ -987,7 +989,6 @@ def BuildSpecific(env, mode, env_overrides):
if context.options['soname'] == 'on': if context.options['soname'] == 'on':
# When building shared object with SONAME version the library name. # When building shared object with SONAME version the library name.
library_name += '-' + version library_name += '-' + version
env['LIBRARY'] = library_name
# Generate library SONAME if required by the build. # Generate library SONAME if required by the build.
if context.options['soname'] == 'on': if context.options['soname'] == 'on':
@ -1008,6 +1009,7 @@ def BuildSpecific(env, mode, env_overrides):
# Link the object files into a library. # Link the object files into a library.
env.Replace(**context.flags['v8']) env.Replace(**context.flags['v8'])
env.Prepend(LIBS=[library_name])
context.ApplyEnvOverrides(env) context.ApplyEnvOverrides(env)
if context.options['library'] == 'static': if context.options['library'] == 'static':
@ -1027,8 +1029,9 @@ def BuildSpecific(env, mode, env_overrides):
context.d8_targets.append(shell) context.d8_targets.append(shell)
for sample in context.samples: for sample in context.samples:
sample_env = Environment(LIBRARY=library_name) sample_env = Environment()
sample_env.Replace(**context.flags['sample']) sample_env.Replace(**context.flags['sample'])
sample_env.Prepend(LIBS=[library_name])
context.ApplyEnvOverrides(sample_env) context.ApplyEnvOverrides(sample_env)
sample_object = sample_env.SConscript( sample_object = sample_env.SConscript(
join('samples', 'SConscript'), join('samples', 'SConscript'),

15
deps/v8/include/v8-debug.h

@ -237,9 +237,10 @@ class EXPORT Debug {
* With this call the debugger is entered and the function specified is called * With this call the debugger is entered and the function specified is called
* with the execution state as the first argument. This makes it possible to * with the execution state as the first argument. This makes it possible to
* get access to information otherwise not available during normal JavaScript * get access to information otherwise not available during normal JavaScript
* execution e.g. details on stack frames. The following example show a * execution e.g. details on stack frames. Receiver of the function call will
* JavaScript function which when passed to v8::Debug::Call will return the * be the debugger context global object, however this is a subject to change.
* current line of JavaScript execution. * The following example show a JavaScript function which when passed to
* v8::Debug::Call will return the current line of JavaScript execution.
* *
* \code * \code
* function frame_source_line(exec_state) { * function frame_source_line(exec_state) {
@ -302,6 +303,14 @@ class EXPORT Debug {
* of this method. * of this method.
*/ */
static void ProcessDebugMessages(); static void ProcessDebugMessages();
/**
* Debugger is running in it's own context which is entered while debugger
* messages are being dispatched. This is an explicit getter for this
* debugger context. Note that the content of the debugger context is subject
* to change.
*/
static Local<Context> GetDebugContext();
}; };

76
deps/v8/include/v8.h

@ -2158,12 +2158,26 @@ typedef void (*FailedAccessCheckCallback)(Local<Object> target,
// --- G a r b a g e C o l l e c t i o n C a l l b a c k s // --- G a r b a g e C o l l e c t i o n C a l l b a c k s
/** /**
* Applications can register a callback function which is called * Applications can register callback functions which will be called
* before and after a major garbage collection. Allocations are not * before and after a garbage collection. Allocations are not
* allowed in the callback function, you therefore cannot manipulate * allowed in the callback functions, you therefore cannot manipulate
* objects (set or delete properties for example) since it is possible * objects (set or delete properties for example) since it is possible
* such operations will result in the allocation of objects. * such operations will result in the allocation of objects.
*/ */
enum GCType {
kGCTypeScavenge = 1 << 0,
kGCTypeMarkSweepCompact = 1 << 1,
kGCTypeAll = kGCTypeScavenge | kGCTypeMarkSweepCompact
};
enum GCCallbackFlags {
kNoGCCallbackFlags = 0,
kGCCallbackFlagCompacted = 1 << 0
};
typedef void (*GCPrologueCallback)(GCType type, GCCallbackFlags flags);
typedef void (*GCEpilogueCallback)(GCType type, GCCallbackFlags flags);
typedef void (*GCCallback)(); typedef void (*GCCallback)();
@ -2299,7 +2313,27 @@ class V8EXPORT V8 {
/** /**
* Enables the host application to receive a notification before a * Enables the host application to receive a notification before a
* major garbage colletion. Allocations are not allowed in the * garbage collection. Allocations are not allowed in the
* callback function, you therefore cannot manipulate objects (set
* or delete properties for example) since it is possible such
* operations will result in the allocation of objects. It is possible
* to specify the GCType filter for your callback. But it is not possible to
* register the same callback function two times with different
* GCType filters.
*/
static void AddGCPrologueCallback(
GCPrologueCallback callback, GCType gc_type_filter = kGCTypeAll);
/**
* This function removes callback which was installed by
* AddGCPrologueCallback function.
*/
static void RemoveGCPrologueCallback(GCPrologueCallback callback);
/**
* The function is deprecated. Please use AddGCPrologueCallback instead.
* Enables the host application to receive a notification before a
* garbage collection. Allocations are not allowed in the
* callback function, you therefore cannot manipulate objects (set * callback function, you therefore cannot manipulate objects (set
* or delete properties for example) since it is possible such * or delete properties for example) since it is possible such
* operations will result in the allocation of objects. * operations will result in the allocation of objects.
@ -2307,6 +2341,26 @@ class V8EXPORT V8 {
static void SetGlobalGCPrologueCallback(GCCallback); static void SetGlobalGCPrologueCallback(GCCallback);
/** /**
* Enables the host application to receive a notification after a
* garbage collection. Allocations are not allowed in the
* callback function, you therefore cannot manipulate objects (set
* or delete properties for example) since it is possible such
* operations will result in the allocation of objects. It is possible
* to specify the GCType filter for your callback. But it is not possible to
* register the same callback function two times with different
* GCType filters.
*/
static void AddGCEpilogueCallback(
GCEpilogueCallback callback, GCType gc_type_filter = kGCTypeAll);
/**
* This function removes callback which was installed by
* AddGCEpilogueCallback function.
*/
static void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
/**
* The function is deprecated. Please use AddGCEpilogueCallback instead.
* Enables the host application to receive a notification after a * Enables the host application to receive a notification after a
* major garbage collection. Allocations are not allowed in the * major garbage collection. Allocations are not allowed in the
* callback function, you therefore cannot manipulate objects (set * callback function, you therefore cannot manipulate objects (set
@ -2681,9 +2735,21 @@ class V8EXPORT Context {
*/ */
void DetachGlobal(); void DetachGlobal();
/**
* Reattaches a global object to a context. This can be used to
* restore the connection between a global object and a context
* after DetachGlobal has been called.
*
* \param global_object The global object to reattach to the
* context. For this to work, the global object must be the global
* object that was associated with this context before a call to
* DetachGlobal.
*/
void ReattachGlobal(Handle<Object> global_object);
/** Creates a new context. */ /** Creates a new context. */
static Persistent<Context> New( static Persistent<Context> New(
ExtensionConfiguration* extensions = 0, ExtensionConfiguration* extensions = NULL,
Handle<ObjectTemplate> global_template = Handle<ObjectTemplate>(), Handle<ObjectTemplate> global_template = Handle<ObjectTemplate>(),
Handle<Value> global_object = Handle<Value>()); Handle<Value> global_object = Handle<Value>());

18
deps/v8/src/SConscript

@ -113,6 +113,8 @@ SOURCES = {
"""), """),
'arch:arm': Split(""" 'arch:arm': Split("""
fast-codegen.cc fast-codegen.cc
jump-target-light.cc
virtual-frame-light.cc
arm/builtins-arm.cc arm/builtins-arm.cc
arm/codegen-arm.cc arm/codegen-arm.cc
arm/constants-arm.cc arm/constants-arm.cc
@ -156,6 +158,8 @@ SOURCES = {
mips/virtual-frame-mips.cc mips/virtual-frame-mips.cc
"""), """),
'arch:ia32': Split(""" 'arch:ia32': Split("""
jump-target-heavy.cc
virtual-frame-heavy.cc
ia32/assembler-ia32.cc ia32/assembler-ia32.cc
ia32/builtins-ia32.cc ia32/builtins-ia32.cc
ia32/codegen-ia32.cc ia32/codegen-ia32.cc
@ -175,6 +179,8 @@ SOURCES = {
"""), """),
'arch:x64': Split(""" 'arch:x64': Split("""
fast-codegen.cc fast-codegen.cc
jump-target-heavy.cc
virtual-frame-heavy.cc
x64/assembler-x64.cc x64/assembler-x64.cc
x64/builtins-x64.cc x64/builtins-x64.cc
x64/codegen-x64.cc x64/codegen-x64.cc
@ -252,12 +258,12 @@ uri.js
math.js math.js
messages.js messages.js
apinatives.js apinatives.js
debug-delay.js date.js
liveedit-delay.js regexp.js
mirror-delay.js json.js
date-delay.js liveedit-debugger.js
regexp-delay.js mirror-debugger.js
json-delay.js debug-debugger.js
'''.split() '''.split()

97
deps/v8/src/api.cc

@ -537,10 +537,17 @@ i::Object** v8::HandleScope::RawClose(i::Object** value) {
LOG_API("CloseHandleScope"); LOG_API("CloseHandleScope");
// Read the result before popping the handle block. // Read the result before popping the handle block.
i::Object* result = *value; i::Object* result = NULL;
if (value != NULL) {
result = *value;
}
is_closed_ = true; is_closed_ = true;
i::HandleScope::Leave(&previous_); i::HandleScope::Leave(&previous_);
if (value == NULL) {
return NULL;
}
// Allocate a new handle on the previous handle block. // Allocate a new handle on the previous handle block.
i::Handle<i::Object> handle(result); i::Handle<i::Object> handle(result);
return handle.location(); return handle.location();
@ -1136,7 +1143,7 @@ Local<Script> Script::New(v8::Handle<String> source,
if (pre_data_impl != NULL && !pre_data_impl->SanityCheck()) { if (pre_data_impl != NULL && !pre_data_impl->SanityCheck()) {
pre_data_impl = NULL; pre_data_impl = NULL;
} }
i::Handle<i::JSFunction> boilerplate = i::Handle<i::SharedFunctionInfo> result =
i::Compiler::Compile(str, i::Compiler::Compile(str,
name_obj, name_obj,
line_offset, line_offset,
@ -1145,9 +1152,9 @@ Local<Script> Script::New(v8::Handle<String> source,
pre_data_impl, pre_data_impl,
Utils::OpenHandle(*script_data), Utils::OpenHandle(*script_data),
i::NOT_NATIVES_CODE); i::NOT_NATIVES_CODE);
has_pending_exception = boilerplate.is_null(); has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(Local<Script>()); EXCEPTION_BAILOUT_CHECK(Local<Script>());
return Local<Script>(ToApi<Script>(boilerplate)); return Local<Script>(ToApi<Script>(result));
} }
@ -1168,10 +1175,12 @@ Local<Script> Script::Compile(v8::Handle<String> source,
Local<Script> generic = New(source, origin, pre_data, script_data); Local<Script> generic = New(source, origin, pre_data, script_data);
if (generic.IsEmpty()) if (generic.IsEmpty())
return generic; return generic;
i::Handle<i::JSFunction> boilerplate = Utils::OpenHandle(*generic); i::Handle<i::Object> obj = Utils::OpenHandle(*generic);
i::Handle<i::SharedFunctionInfo> function =
i::Handle<i::SharedFunctionInfo>(i::SharedFunctionInfo::cast(*obj));
i::Handle<i::JSFunction> result = i::Handle<i::JSFunction> result =
i::Factory::NewFunctionFromBoilerplate(boilerplate, i::Factory::NewFunctionFromSharedFunctionInfo(function,
i::Top::global_context()); i::Top::global_context());
return Local<Script>(ToApi<Script>(result)); return Local<Script>(ToApi<Script>(result));
} }
@ -1191,10 +1200,15 @@ Local<Value> Script::Run() {
i::Object* raw_result = NULL; i::Object* raw_result = NULL;
{ {
HandleScope scope; HandleScope scope;
i::Handle<i::JSFunction> fun = Utils::OpenHandle(this); i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (fun->IsBoilerplate()) { i::Handle<i::JSFunction> fun;
fun = i::Factory::NewFunctionFromBoilerplate(fun, if (obj->IsSharedFunctionInfo()) {
i::Top::global_context()); i::Handle<i::SharedFunctionInfo>
function_info(i::SharedFunctionInfo::cast(*obj));
fun = i::Factory::NewFunctionFromSharedFunctionInfo(
function_info, i::Top::global_context());
} else {
fun = i::Handle<i::JSFunction>(i::JSFunction::cast(*obj));
} }
EXCEPTION_PREAMBLE(); EXCEPTION_PREAMBLE();
i::Handle<i::Object> receiver(i::Top::context()->global_proxy()); i::Handle<i::Object> receiver(i::Top::context()->global_proxy());
@ -1208,14 +1222,28 @@ Local<Value> Script::Run() {
} }
static i::Handle<i::SharedFunctionInfo> OpenScript(Script* script) {
i::Handle<i::Object> obj = Utils::OpenHandle(script);
i::Handle<i::SharedFunctionInfo> result;
if (obj->IsSharedFunctionInfo()) {
result =
i::Handle<i::SharedFunctionInfo>(i::SharedFunctionInfo::cast(*obj));
} else {
result =
i::Handle<i::SharedFunctionInfo>(i::JSFunction::cast(*obj)->shared());
}
return result;
}
Local<Value> Script::Id() { Local<Value> Script::Id() {
ON_BAILOUT("v8::Script::Id()", return Local<Value>()); ON_BAILOUT("v8::Script::Id()", return Local<Value>());
LOG_API("Script::Id"); LOG_API("Script::Id");
i::Object* raw_id = NULL; i::Object* raw_id = NULL;
{ {
HandleScope scope; HandleScope scope;
i::Handle<i::JSFunction> fun = Utils::OpenHandle(this); i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
i::Handle<i::Script> script(i::Script::cast(fun->shared()->script())); i::Handle<i::Script> script(i::Script::cast(function_info->script()));
i::Handle<i::Object> id(script->id()); i::Handle<i::Object> id(script->id());
raw_id = *id; raw_id = *id;
} }
@ -1229,9 +1257,9 @@ void Script::SetData(v8::Handle<String> data) {
LOG_API("Script::SetData"); LOG_API("Script::SetData");
{ {
HandleScope scope; HandleScope scope;
i::Handle<i::JSFunction> fun = Utils::OpenHandle(this); i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
i::Handle<i::Object> raw_data = Utils::OpenHandle(*data); i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
i::Handle<i::Script> script(i::Script::cast(fun->shared()->script())); i::Handle<i::Script> script(i::Script::cast(function_info->script()));
script->set_data(*raw_data); script->set_data(*raw_data);
} }
} }
@ -3057,6 +3085,16 @@ void Context::DetachGlobal() {
} }
void Context::ReattachGlobal(Handle<Object> global_object) {
if (IsDeadCheck("v8::Context::ReattachGlobal()")) return;
ENTER_V8;
i::Object** ctx = reinterpret_cast<i::Object**>(this);
i::Handle<i::Context> context =
i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
i::Bootstrapper::ReattachGlobal(context, Utils::OpenHandle(*global_object));
}
Local<v8::Object> ObjectTemplate::NewInstance() { Local<v8::Object> ObjectTemplate::NewInstance() {
ON_BAILOUT("v8::ObjectTemplate::NewInstance()", return Local<v8::Object>()); ON_BAILOUT("v8::ObjectTemplate::NewInstance()", return Local<v8::Object>());
LOG_API("ObjectTemplate::NewInstance"); LOG_API("ObjectTemplate::NewInstance");
@ -3525,6 +3563,30 @@ void V8::SetGlobalGCEpilogueCallback(GCCallback callback) {
} }
void V8::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
if (IsDeadCheck("v8::V8::AddGCPrologueCallback()")) return;
i::Heap::AddGCPrologueCallback(callback, gc_type);
}
void V8::RemoveGCPrologueCallback(GCPrologueCallback callback) {
if (IsDeadCheck("v8::V8::RemoveGCPrologueCallback()")) return;
i::Heap::RemoveGCPrologueCallback(callback);
}
void V8::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
if (IsDeadCheck("v8::V8::AddGCEpilogueCallback()")) return;
i::Heap::AddGCEpilogueCallback(callback, gc_type);
}
void V8::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
if (IsDeadCheck("v8::V8::RemoveGCEpilogueCallback()")) return;
i::Heap::RemoveGCEpilogueCallback(callback);
}
void V8::PauseProfiler() { void V8::PauseProfiler() {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
PauseProfilerEx(PROFILER_MODULE_CPU); PauseProfilerEx(PROFILER_MODULE_CPU);
@ -3928,6 +3990,11 @@ void Debug::ProcessDebugMessages() {
i::Execution::ProcessDebugMesssages(true); i::Execution::ProcessDebugMesssages(true);
} }
Local<Context> Debug::GetDebugContext() {
i::EnterDebugger debugger;
return Utils::ToLocal(i::Debug::debug_context());
}
#endif // ENABLE_DEBUGGER_SUPPORT #endif // ENABLE_DEBUGGER_SUPPORT
namespace internal { namespace internal {

12
deps/v8/src/api.h

@ -221,7 +221,7 @@ class Utils {
OpenHandle(const v8::Array* data); OpenHandle(const v8::Array* data);
static inline v8::internal::Handle<v8::internal::String> static inline v8::internal::Handle<v8::internal::String>
OpenHandle(const String* data); OpenHandle(const String* data);
static inline v8::internal::Handle<v8::internal::JSFunction> static inline v8::internal::Handle<v8::internal::Object>
OpenHandle(const Script* data); OpenHandle(const Script* data);
static inline v8::internal::Handle<v8::internal::JSFunction> static inline v8::internal::Handle<v8::internal::JSFunction>
OpenHandle(const Function* data); OpenHandle(const Function* data);
@ -247,7 +247,11 @@ static inline T* ToApi(v8::internal::Handle<v8::internal::Object> obj) {
template <class T> template <class T>
v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom( v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom(
v8::HandleScope* scope) { v8::HandleScope* scope) {
return Utils::OpenHandle(*scope->Close(Utils::ToLocal(*this))); v8::internal::Handle<T> handle;
if (!is_null()) {
handle = *this;
}
return Utils::OpenHandle(*scope->Close(Utils::ToLocal(handle)));
} }
@ -255,7 +259,7 @@ v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom(
#define MAKE_TO_LOCAL(Name, From, To) \ #define MAKE_TO_LOCAL(Name, From, To) \
Local<v8::To> Utils::Name(v8::internal::Handle<v8::internal::From> obj) { \ Local<v8::To> Utils::Name(v8::internal::Handle<v8::internal::From> obj) { \
ASSERT(!obj->IsTheHole()); \ ASSERT(obj.is_null() || !obj->IsTheHole()); \
return Local<To>(reinterpret_cast<To*>(obj.location())); \ return Local<To>(reinterpret_cast<To*>(obj.location())); \
} }
@ -296,7 +300,7 @@ MAKE_OPEN_HANDLE(Data, Object)
MAKE_OPEN_HANDLE(Object, JSObject) MAKE_OPEN_HANDLE(Object, JSObject)
MAKE_OPEN_HANDLE(Array, JSArray) MAKE_OPEN_HANDLE(Array, JSArray)
MAKE_OPEN_HANDLE(String, String) MAKE_OPEN_HANDLE(String, String)
MAKE_OPEN_HANDLE(Script, JSFunction) MAKE_OPEN_HANDLE(Script, Object)
MAKE_OPEN_HANDLE(Function, JSFunction) MAKE_OPEN_HANDLE(Function, JSFunction)
MAKE_OPEN_HANDLE(Message, JSObject) MAKE_OPEN_HANDLE(Message, JSObject)
MAKE_OPEN_HANDLE(Context, Context) MAKE_OPEN_HANDLE(Context, Context)

2
deps/v8/src/apinatives.js

@ -31,7 +31,7 @@
function CreateDate(time) { function CreateDate(time) {
var date = new ORIGINAL_DATE(); var date = new $Date();
date.setTime(time); date.setTime(time);
return date; return date;
} }

245
deps/v8/src/arm/assembler-arm.cc

@ -47,21 +47,41 @@ unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::enabled_ = 0; unsigned CpuFeatures::enabled_ = 0;
unsigned CpuFeatures::found_by_runtime_probing_ = 0; unsigned CpuFeatures::found_by_runtime_probing_ = 0;
#ifdef __arm__
static uint64_t CpuFeaturesImpliedByCompiler() {
uint64_t answer = 0;
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
answer |= 1u << ARMv7;
#endif // def CAN_USE_ARMV7_INSTRUCTIONS
// If the compiler is allowed to use VFP then we can use VFP too in our code
// generation even when generating snapshots. This won't work for cross
// compilation.
#if defined(__VFP_FP__) && !defined(__SOFTFP__)
answer |= 1u << VFP3;
#endif // defined(__VFP_FP__) && !defined(__SOFTFP__)
#ifdef CAN_USE_VFP_INSTRUCTIONS
answer |= 1u << VFP3;
#endif // def CAN_USE_VFP_INSTRUCTIONS
return answer;
}
#endif // def __arm__
void CpuFeatures::Probe() { void CpuFeatures::Probe() {
// If the compiler is allowed to use vfp then we can use vfp too in our #ifndef __arm__
// code generation.
#if !defined(__arm__)
// For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled. // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
if (FLAG_enable_vfp3) { if (FLAG_enable_vfp3) {
supported_ |= 1u << VFP3; supported_ |= 1u << VFP3;
} }
// For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
if (FLAG_enable_armv7) { if (FLAG_enable_armv7) {
supported_ |= 1u << ARMv7; supported_ |= 1u << ARMv7;
} }
#else #else // def __arm__
if (Serializer::enabled()) { if (Serializer::enabled()) {
supported_ |= OS::CpuFeaturesImpliedByPlatform(); supported_ |= OS::CpuFeaturesImpliedByPlatform();
supported_ |= CpuFeaturesImpliedByCompiler();
return; // No features if we might serialize. return; // No features if we might serialize.
} }
@ -532,7 +552,7 @@ static bool MustUseIp(RelocInfo::Mode rmode) {
if (!Serializer::enabled()) { if (!Serializer::enabled()) {
Serializer::TooLateToEnableNow(); Serializer::TooLateToEnableNow();
} }
#endif #endif // def DEBUG
return Serializer::enabled(); return Serializer::enabled();
} else if (rmode == RelocInfo::NONE) { } else if (rmode == RelocInfo::NONE) {
return false; return false;
@ -1137,14 +1157,16 @@ void Assembler::swpb(Register dst,
// Exception-generating instructions and debugging support. // Exception-generating instructions and debugging support.
void Assembler::stop(const char* msg) { void Assembler::stop(const char* msg) {
#if !defined(__arm__) #ifndef __arm__
// The simulator handles these special instructions and stops execution. // The simulator handles these special instructions and stops execution.
emit(15 << 28 | ((intptr_t) msg)); emit(15 << 28 | ((intptr_t) msg));
#else #else // def __arm__
// Just issue a simple break instruction for now. Alternatively we could use #ifdef CAN_USE_ARMV5_INSTRUCTIONS
// the swi(0x9f0001) instruction on Linux.
bkpt(0); bkpt(0);
#endif #else // ndef CAN_USE_ARMV5_INSTRUCTIONS
swi(0x9f0001);
#endif // ndef CAN_USE_ARMV5_INSTRUCTIONS
#endif // def __arm__
} }
@ -1319,11 +1341,28 @@ void Assembler::vldr(const DwVfpRegister dst,
// Vdst(15-12) | 1011(11-8) | offset // Vdst(15-12) | 1011(11-8) | offset
ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(offset % 4 == 0); ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256);
emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 | emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
0xB*B8 | ((offset / 4) & 255)); 0xB*B8 | ((offset / 4) & 255));
} }
void Assembler::vldr(const SwVfpRegister dst,
const Register base,
int offset,
const Condition cond) {
// Sdst = MEM(Rbase + offset).
// Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | offset
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256);
emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
0xA*B8 | ((offset / 4) & 255));
}
void Assembler::vstr(const DwVfpRegister src, void Assembler::vstr(const DwVfpRegister src,
const Register base, const Register base,
int offset, int offset,
@ -1334,6 +1373,7 @@ void Assembler::vstr(const DwVfpRegister src,
// Vsrc(15-12) | 1011(11-8) | (offset/4) // Vsrc(15-12) | 1011(11-8) | (offset/4)
ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(offset % 4 == 0); ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256);
emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 | emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
0xB*B8 | ((offset / 4) & 255)); 0xB*B8 | ((offset / 4) & 255));
} }
@ -1397,31 +1437,172 @@ void Assembler::vmov(const Register dst,
} }
void Assembler::vcvt(const DwVfpRegister dst, // Type of data to read from or write to VFP register.
const SwVfpRegister src, // Used as specifier in generic vcvt instruction.
const Condition cond) { enum VFPType { S32, U32, F32, F64 };
// Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
// Instruction details available in ARM DDI 0406A, A8-576.
// cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=000(18-16) | static bool IsSignedVFPType(VFPType type) {
// Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0) switch (type) {
case S32:
return true;
case U32:
return false;
default:
UNREACHABLE();
return false;
}
}
static bool IsIntegerVFPType(VFPType type) {
switch (type) {
case S32:
case U32:
return true;
case F32:
case F64:
return false;
default:
UNREACHABLE();
return false;
}
}
static bool IsDoubleVFPType(VFPType type) {
switch (type) {
case F32:
return false;
case F64:
return true;
default:
UNREACHABLE();
return false;
}
}
// Depending on split_last_bit split binary representation of reg_code into Vm:M
// or M:Vm form (where M is single bit).
static void SplitRegCode(bool split_last_bit,
int reg_code,
int* vm,
int* m) {
if (split_last_bit) {
*m = reg_code & 0x1;
*vm = reg_code >> 1;
} else {
*m = (reg_code & 0x10) >> 4;
*vm = reg_code & 0x0F;
}
}
// Encode vcvt.src_type.dst_type instruction.
static Instr EncodeVCVT(const VFPType dst_type,
const int dst_code,
const VFPType src_type,
const int src_code,
const Condition cond) {
if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
// Conversion between IEEE floating point and 32-bit integer.
// Instruction details available in ARM DDI 0406B, A8.6.295.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
// Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
int sz, opc2, D, Vd, M, Vm, op;
if (IsIntegerVFPType(dst_type)) {
opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
op = 1; // round towards zero
SplitRegCode(!IsDoubleVFPType(src_type), src_code, &Vm, &M);
SplitRegCode(true, dst_code, &Vd, &D);
} else {
ASSERT(IsIntegerVFPType(src_type));
opc2 = 0x0;
sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
SplitRegCode(true, src_code, &Vm, &M);
SplitRegCode(!IsDoubleVFPType(dst_type), dst_code, &Vd, &D);
}
return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
} else {
// Conversion between IEEE double and single precision.
// Instruction details available in ARM DDI 0406B, A8.6.298.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
// Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
int sz, D, Vd, M, Vm;
ASSERT(IsDoubleVFPType(dst_type) != IsDoubleVFPType(src_type));
sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
SplitRegCode(IsDoubleVFPType(src_type), dst_code, &Vd, &D);
SplitRegCode(!IsDoubleVFPType(src_type), src_code, &Vm, &M);
return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
}
}
void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 | emit(EncodeVCVT(F64, dst.code(), S32, src.code(), cond));
dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 |
(0x1 & src.code())*B5 | (src.code() >> 1));
} }
void Assembler::vcvt(const SwVfpRegister dst, void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
const DwVfpRegister src, const SwVfpRegister src,
const Condition cond) { const Condition cond) {
// Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd). ASSERT(CpuFeatures::IsEnabled(VFP3));
// Instruction details available in ARM DDI 0406A, A8-576. emit(EncodeVCVT(F32, dst.code(), S32, src.code(), cond));
// cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)| }
// Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0)
void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), U32, src.code(), cond));
}
void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(S32, dst.code(), F64, src.code(), cond));
}
void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(U32, dst.code(), F64, src.code(), cond));
}
void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(EncodeVCVT(F64, dst.code(), F32, src.code(), cond));
}
void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 | emit(EncodeVCVT(F32, dst.code(), F64, src.code(), cond));
0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 |
0x5*B9 | B8 | B7 | B6 | src.code());
} }

33
deps/v8/src/arm/assembler-arm.h

@ -826,6 +826,12 @@ class Assembler : public Malloced {
const Register base, const Register base,
int offset, // Offset must be a multiple of 4. int offset, // Offset must be a multiple of 4.
const Condition cond = al); const Condition cond = al);
void vldr(const SwVfpRegister dst,
const Register base,
int offset, // Offset must be a multiple of 4.
const Condition cond = al);
void vstr(const DwVfpRegister src, void vstr(const DwVfpRegister src,
const Register base, const Register base,
int offset, // Offset must be a multiple of 4. int offset, // Offset must be a multiple of 4.
@ -844,12 +850,27 @@ class Assembler : public Malloced {
void vmov(const Register dst, void vmov(const Register dst,
const SwVfpRegister src, const SwVfpRegister src,
const Condition cond = al); const Condition cond = al);
void vcvt(const DwVfpRegister dst, void vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src, const SwVfpRegister src,
const Condition cond = al); const Condition cond = al);
void vcvt(const SwVfpRegister dst, void vcvt_f32_s32(const SwVfpRegister dst,
const DwVfpRegister src, const SwVfpRegister src,
const Condition cond = al); const Condition cond = al);
void vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
const Condition cond = al);
void vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
void vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
void vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
const Condition cond = al);
void vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
void vadd(const DwVfpRegister dst, void vadd(const DwVfpRegister dst,
const DwVfpRegister src1, const DwVfpRegister src1,

285
deps/v8/src/arm/codegen-arm.cc

@ -2305,14 +2305,13 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
} }
void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) { void CodeGenerator::InstantiateFunction(
Handle<SharedFunctionInfo> function_info) {
VirtualFrame::SpilledScope spilled_scope; VirtualFrame::SpilledScope spilled_scope;
ASSERT(boilerplate->IsBoilerplate()); __ mov(r0, Operand(function_info));
__ mov(r0, Operand(boilerplate));
// Use the fast case closure allocation code that allocates in new // Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning. // space for nested functions that don't need literals cloning.
if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) { if (scope()->is_function_scope() && function_info->num_literals() == 0) {
FastNewClosureStub stub; FastNewClosureStub stub;
frame_->EmitPush(r0); frame_->EmitPush(r0);
frame_->CallStub(&stub, 1); frame_->CallStub(&stub, 1);
@ -2334,27 +2333,27 @@ void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
VirtualFrame::SpilledScope spilled_scope; VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ FunctionLiteral"); Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it. // Build the function info and instantiate it.
Handle<JSFunction> boilerplate = Handle<SharedFunctionInfo> function_info =
Compiler::BuildBoilerplate(node, script(), this); Compiler::BuildFunctionInfo(node, script(), this);
// Check for stack-overflow exception. // Check for stack-overflow exception.
if (HasStackOverflow()) { if (HasStackOverflow()) {
ASSERT(frame_->height() == original_height); ASSERT(frame_->height() == original_height);
return; return;
} }
InstantiateBoilerplate(boilerplate); InstantiateFunction(function_info);
ASSERT(frame_->height() == original_height + 1); ASSERT(frame_->height() == original_height + 1);
} }
void CodeGenerator::VisitFunctionBoilerplateLiteral( void CodeGenerator::VisitSharedFunctionInfoLiteral(
FunctionBoilerplateLiteral* node) { SharedFunctionInfoLiteral* node) {
#ifdef DEBUG #ifdef DEBUG
int original_height = frame_->height(); int original_height = frame_->height();
#endif #endif
VirtualFrame::SpilledScope spilled_scope; VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ FunctionBoilerplateLiteral"); Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
InstantiateBoilerplate(node->boilerplate()); InstantiateFunction(node->shared_function_info());
ASSERT(frame_->height() == original_height + 1); ASSERT(frame_->height() == original_height + 1);
} }
@ -4527,11 +4526,11 @@ void Reference::SetValue(InitState init_state) {
void FastNewClosureStub::Generate(MacroAssembler* masm) { void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Clone the boilerplate in new space. Set the context to the // Create a new closure from the given function info in new
// current context in cp. // space. Set the context to the current context in cp.
Label gc; Label gc;
// Pop the boilerplate function from the stack. // Pop the function info from the stack.
__ pop(r3); __ pop(r3);
// Attempt to allocate new JSFunction in new space. // Attempt to allocate new JSFunction in new space.
@ -4549,20 +4548,18 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX))); __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
// Clone the rest of the boilerplate fields. We don't have to update // Initialize the rest of the function. We don't have to update the
// the write barrier because the allocated object is in new space. // write barrier because the allocated object is in new space.
for (int offset = kPointerSize; __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
offset < JSFunction::kSize; __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
offset += kPointerSize) { __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
if (offset == JSFunction::kContextOffset) { __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
__ str(cp, FieldMemOperand(r0, offset)); __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
} else { __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r1, FieldMemOperand(r3, offset)); __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
__ str(r1, FieldMemOperand(r0, offset)); __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
}
} // Return result. The argument function info has been popped already.
// Return result. The argument boilerplate has been popped already.
__ Ret(); __ Ret();
// Create a new closure through the slower runtime call. // Create a new closure through the slower runtime call.
@ -4685,42 +4682,6 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
} }
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
// instruction. On pre-ARM5 hardware this routine gives the wrong answer for 0
// (31 instead of 32).
static void CountLeadingZeros(
MacroAssembler* masm,
Register source,
Register scratch,
Register zeros) {
#ifdef CAN_USE_ARMV5_INSTRUCTIONS
__ clz(zeros, source); // This instruction is only supported after ARM5.
#else
__ mov(zeros, Operand(0));
__ mov(scratch, source);
// Top 16.
__ tst(scratch, Operand(0xffff0000));
__ add(zeros, zeros, Operand(16), LeaveCC, eq);
__ mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
// Top 8.
__ tst(scratch, Operand(0xff000000));
__ add(zeros, zeros, Operand(8), LeaveCC, eq);
__ mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
// Top 4.
__ tst(scratch, Operand(0xf0000000));
__ add(zeros, zeros, Operand(4), LeaveCC, eq);
__ mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
// Top 2.
__ tst(scratch, Operand(0xc0000000));
__ add(zeros, zeros, Operand(2), LeaveCC, eq);
__ mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
// Top bit.
__ tst(scratch, Operand(0x80000000u));
__ add(zeros, zeros, Operand(1), LeaveCC, eq);
#endif
}
// Takes a Smi and converts to an IEEE 64 bit floating point value in two // Takes a Smi and converts to an IEEE 64 bit floating point value in two
// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
@ -4784,25 +4745,27 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
__ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC); __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
// Subtract from 0 if source was negative. // Subtract from 0 if source was negative.
__ rsb(source_, source_, Operand(0), LeaveCC, ne); __ rsb(source_, source_, Operand(0), LeaveCC, ne);
// We have -1, 0 or 1, which we treat specially. Register source_ contains
// absolute value: it is either equal to 1 (special case of -1 and 1),
// greater than 1 (not a special case) or less than 1 (special case of 0).
__ cmp(source_, Operand(1)); __ cmp(source_, Operand(1));
__ b(gt, &not_special); __ b(gt, &not_special);
// We have -1, 0 or 1, which we treat specially.
__ cmp(source_, Operand(0));
// For 1 or -1 we need to or in the 0 exponent (biased to 1023). // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
static const uint32_t exponent_word_for_1 = static const uint32_t exponent_word_for_1 =
HeapNumber::kExponentBias << HeapNumber::kExponentShift; HeapNumber::kExponentBias << HeapNumber::kExponentShift;
__ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, ne); __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
// 1, 0 and -1 all have 0 for the second word. // 1, 0 and -1 all have 0 for the second word.
__ mov(mantissa, Operand(0)); __ mov(mantissa, Operand(0));
__ Ret(); __ Ret();
__ bind(&not_special); __ bind(&not_special);
// Count leading zeros. Uses result2 for a scratch register on pre-ARM5. // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5.
// Gets the wrong answer for 0, but we already checked for that case above. // Gets the wrong answer for 0, but we already checked for that case above.
CountLeadingZeros(masm, source_, mantissa, zeros_); __ CountLeadingZeros(source_, mantissa, zeros_);
// Compute exponent and or it into the exponent register. // Compute exponent and or it into the exponent register.
// We use result2 as a scratch register here. // We use mantissa as a scratch register here.
__ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias)); __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias));
__ orr(exponent, __ orr(exponent,
exponent, exponent,
@ -4821,45 +4784,6 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
} }
// This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame.
class WriteInt32ToHeapNumberStub : public CodeStub {
public:
WriteInt32ToHeapNumberStub(Register the_int,
Register the_heap_number,
Register scratch)
: the_int_(the_int),
the_heap_number_(the_heap_number),
scratch_(scratch) { }
private:
Register the_int_;
Register the_heap_number_;
Register scratch_;
// Minor key encoding in 16 bits.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 14> {};
Major MajorKey() { return WriteInt32ToHeapNumber; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return the_int_.code() +
(the_heap_number_.code() << 4) +
(scratch_.code() << 8);
}
void Generate(MacroAssembler* masm);
const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
#ifdef DEBUG
void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
#endif
};
// See comment for class. // See comment for class.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int; Label max_negative_int;
@ -5042,7 +4966,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
CpuFeatures::Scope scope(VFP3); CpuFeatures::Scope scope(VFP3);
__ mov(r7, Operand(r1, ASR, kSmiTagSize)); __ mov(r7, Operand(r1, ASR, kSmiTagSize));
__ vmov(s15, r7); __ vmov(s15, r7);
__ vcvt(d7, s15); __ vcvt_f64_s32(d7, s15);
// Load the double from rhs, tagged HeapNumber r0, to d6. // Load the double from rhs, tagged HeapNumber r0, to d6.
__ sub(r7, r0, Operand(kHeapObjectTag)); __ sub(r7, r0, Operand(kHeapObjectTag));
__ vldr(d6, r7, HeapNumber::kValueOffset); __ vldr(d6, r7, HeapNumber::kValueOffset);
@ -5085,7 +5009,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ vldr(d7, r7, HeapNumber::kValueOffset); __ vldr(d7, r7, HeapNumber::kValueOffset);
__ mov(r7, Operand(r0, ASR, kSmiTagSize)); __ mov(r7, Operand(r0, ASR, kSmiTagSize));
__ vmov(s13, r7); __ vmov(s13, r7);
__ vcvt(d6, s13); __ vcvt_f64_s32(d6, s13);
} else { } else {
__ push(lr); __ push(lr);
// Load lhs to a double in r2, r3. // Load lhs to a double in r2, r3.
@ -5494,29 +5418,6 @@ void CompareStub::Generate(MacroAssembler* masm) {
} }
// Allocates a heap number or jumps to the label if the young space is full and
// a scavenge is needed.
static void AllocateHeapNumber(
MacroAssembler* masm,
Label* need_gc, // Jump here if young space is full.
Register result, // The tagged address of the new heap number.
Register scratch1, // A scratch register.
Register scratch2) { // Another scratch register.
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
__ AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
result,
scratch1,
scratch2,
need_gc,
TAG_OBJECT);
// Get heap number map and store it in the allocated object.
__ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
__ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
}
// We fall into this code if the operands were Smis, but the result was // We fall into this code if the operands were Smis, but the result was
// not (eg. overflow). We branch into this code (to the not_smi label) if // not (eg. overflow). We branch into this code (to the not_smi label) if
// the operands were not both Smi. The operands are in r0 and r1. In order // the operands were not both Smi. The operands are in r0 and r1. In order
@ -5533,7 +5434,7 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// Smi-smi case (overflow). // Smi-smi case (overflow).
// Since both are Smis there is no heap number to overwrite, so allocate. // Since both are Smis there is no heap number to overwrite, so allocate.
// The new heap number is in r5. r6 and r7 are scratch. // The new heap number is in r5. r6 and r7 are scratch.
AllocateHeapNumber(masm, &slow, r5, r6, r7); __ AllocateHeapNumber(r5, r6, r7, &slow);
// If we have floating point hardware, inline ADD, SUB, MUL, and DIV, // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
// using registers d7 and d6 for the double values. // using registers d7 and d6 for the double values.
@ -5543,10 +5444,10 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
CpuFeatures::Scope scope(VFP3); CpuFeatures::Scope scope(VFP3);
__ mov(r7, Operand(r0, ASR, kSmiTagSize)); __ mov(r7, Operand(r0, ASR, kSmiTagSize));
__ vmov(s15, r7); __ vmov(s15, r7);
__ vcvt(d7, s15); __ vcvt_f64_s32(d7, s15);
__ mov(r7, Operand(r1, ASR, kSmiTagSize)); __ mov(r7, Operand(r1, ASR, kSmiTagSize));
__ vmov(s13, r7); __ vmov(s13, r7);
__ vcvt(d6, s13); __ vcvt_f64_s32(d6, s13);
} else { } else {
// Write Smi from r0 to r3 and r2 in double format. r6 is scratch. // Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
__ mov(r7, Operand(r0)); __ mov(r7, Operand(r0));
@ -5628,7 +5529,7 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
if (mode == NO_OVERWRITE) { if (mode == NO_OVERWRITE) {
// In the case where there is no chance of an overwritable float we may as // In the case where there is no chance of an overwritable float we may as
// well do the allocation immediately while r0 and r1 are untouched. // well do the allocation immediately while r0 and r1 are untouched.
AllocateHeapNumber(masm, &slow, r5, r6, r7); __ AllocateHeapNumber(r5, r6, r7, &slow);
} }
// Move r0 to a double in r2-r3. // Move r0 to a double in r2-r3.
@ -5653,7 +5554,7 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
__ bind(&r0_is_smi); __ bind(&r0_is_smi);
if (mode == OVERWRITE_RIGHT) { if (mode == OVERWRITE_RIGHT) {
// We can't overwrite a Smi so get address of new heap number into r5. // We can't overwrite a Smi so get address of new heap number into r5.
AllocateHeapNumber(masm, &slow, r5, r6, r7); __ AllocateHeapNumber(r5, r6, r7, &slow);
} }
if (use_fp_registers) { if (use_fp_registers) {
@ -5661,7 +5562,7 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// Convert smi in r0 to double in d7. // Convert smi in r0 to double in d7.
__ mov(r7, Operand(r0, ASR, kSmiTagSize)); __ mov(r7, Operand(r0, ASR, kSmiTagSize));
__ vmov(s15, r7); __ vmov(s15, r7);
__ vcvt(d7, s15); __ vcvt_f64_s32(d7, s15);
} else { } else {
// Write Smi from r0 to r3 and r2 in double format. // Write Smi from r0 to r3 and r2 in double format.
__ mov(r7, Operand(r0)); __ mov(r7, Operand(r0));
@ -5695,7 +5596,7 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
__ bind(&r1_is_smi); __ bind(&r1_is_smi);
if (mode == OVERWRITE_LEFT) { if (mode == OVERWRITE_LEFT) {
// We can't overwrite a Smi so get address of new heap number into r5. // We can't overwrite a Smi so get address of new heap number into r5.
AllocateHeapNumber(masm, &slow, r5, r6, r7); __ AllocateHeapNumber(r5, r6, r7, &slow);
} }
if (use_fp_registers) { if (use_fp_registers) {
@ -5703,7 +5604,7 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// Convert smi in r1 to double in d6. // Convert smi in r1 to double in d6.
__ mov(r7, Operand(r1, ASR, kSmiTagSize)); __ mov(r7, Operand(r1, ASR, kSmiTagSize));
__ vmov(s13, r7); __ vmov(s13, r7);
__ vcvt(d6, s13); __ vcvt_f64_s32(d6, s13);
} else { } else {
// Write Smi from r1 to r1 and r0 in double format. // Write Smi from r1 to r1 and r0 in double format.
__ mov(r7, Operand(r1)); __ mov(r7, Operand(r1));
@ -5830,7 +5731,7 @@ static void GetInt32(MacroAssembler* masm,
// conversion using round to zero. // conversion using round to zero.
__ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset)); __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
__ vmov(d7, scratch2, scratch); __ vmov(d7, scratch2, scratch);
__ vcvt(s15, d7); __ vcvt_s32_f64(s15, d7);
__ vmov(dest, s15); __ vmov(dest, s15);
} else { } else {
// Get the top bits of the mantissa. // Get the top bits of the mantissa.
@ -5942,7 +5843,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
} }
case NO_OVERWRITE: { case NO_OVERWRITE: {
// Get a new heap number in r5. r6 and r7 are scratch. // Get a new heap number in r5. r6 and r7 are scratch.
AllocateHeapNumber(masm, &slow, r5, r6, r7); __ AllocateHeapNumber(r5, r6, r7, &slow);
} }
default: break; default: break;
} }
@ -5962,7 +5863,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
if (mode_ != NO_OVERWRITE) { if (mode_ != NO_OVERWRITE) {
__ bind(&have_to_allocate); __ bind(&have_to_allocate);
// Get a new heap number in r5. r6 and r7 are scratch. // Get a new heap number in r5. r6 and r7 are scratch.
AllocateHeapNumber(masm, &slow, r5, r6, r7); __ AllocateHeapNumber(r5, r6, r7, &slow);
__ jmp(&got_a_heap_number); __ jmp(&got_a_heap_number);
} }
@ -6380,7 +6281,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
__ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
} else { } else {
AllocateHeapNumber(masm, &slow, r1, r2, r3); __ AllocateHeapNumber(r1, r2, r3, &slow);
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
@ -6410,7 +6311,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
// Allocate a fresh heap number, but don't overwrite r0 until // Allocate a fresh heap number, but don't overwrite r0 until
// we're sure we can do it without going through the slow case // we're sure we can do it without going through the slow case
// that needs the value in r0. // that needs the value in r0.
AllocateHeapNumber(masm, &slow, r2, r3, r4); __ AllocateHeapNumber(r2, r3, r4, &slow);
__ mov(r0, Operand(r2)); __ mov(r0, Operand(r2));
} }
@ -7117,53 +7018,59 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
} }
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
const char* CompareStub::GetName() { const char* CompareStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* cc_name;
switch (cc_) { switch (cc_) {
case lt: return "CompareStub_LT"; case lt: cc_name = "LT"; break;
case gt: return "CompareStub_GT"; case gt: cc_name = "GT"; break;
case le: return "CompareStub_LE"; case le: cc_name = "LE"; break;
case ge: return "CompareStub_GE"; case ge: cc_name = "GE"; break;
case ne: { case eq: cc_name = "EQ"; break;
if (strict_) { case ne: cc_name = "NE"; break;
if (never_nan_nan_) { default: cc_name = "UnknownCondition"; break;
return "CompareStub_NE_STRICT_NO_NAN";
} else {
return "CompareStub_NE_STRICT";
}
} else {
if (never_nan_nan_) {
return "CompareStub_NE_NO_NAN";
} else {
return "CompareStub_NE";
}
}
}
case eq: {
if (strict_) {
if (never_nan_nan_) {
return "CompareStub_EQ_STRICT_NO_NAN";
} else {
return "CompareStub_EQ_STRICT";
}
} else {
if (never_nan_nan_) {
return "CompareStub_EQ_NO_NAN";
} else {
return "CompareStub_EQ";
}
}
}
default: return "CompareStub";
} }
const char* strict_name = "";
if (strict_ && (cc_ == eq || cc_ == ne)) {
strict_name = "_STRICT";
}
const char* never_nan_nan_name = "";
if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
never_nan_nan_name = "_NO_NAN";
}
const char* include_number_compare_name = "";
if (!include_number_compare_) {
include_number_compare_name = "_NO_NUMBER";
}
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
"CompareStub_%s%s%s%s",
cc_name,
strict_name,
never_nan_nan_name,
include_number_compare_name);
return name_;
} }
int CompareStub::MinorKey() { int CompareStub::MinorKey() {
// Encode the three parameters in a unique 16 bit value. // Encode the three parameters in a unique 16 bit value. To avoid duplicate
ASSERT((static_cast<unsigned>(cc_) >> 26) < (1 << 16)); // stubs the never NaN NaN condition is only taken into account if the
int nnn_value = (never_nan_nan_ ? 2 : 0); // condition is equals.
if (cc_ != eq) nnn_value = 0; // Avoid duplicate stubs. ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 13));
return (static_cast<unsigned>(cc_) >> 26) | nnn_value | (strict_ ? 1 : 0); return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
| StrictField::encode(strict_)
| NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
| IncludeNumberCompareField::encode(include_number_compare_);
} }

44
deps/v8/src/arm/codegen-arm.h

@ -348,8 +348,8 @@ class CodeGenerator: public AstVisitor {
// name/value pairs. // name/value pairs.
void DeclareGlobals(Handle<FixedArray> pairs); void DeclareGlobals(Handle<FixedArray> pairs);
// Instantiate the function boilerplate. // Instantiate the function based on the shared function info.
void InstantiateBoilerplate(Handle<JSFunction> boilerplate); void InstantiateFunction(Handle<SharedFunctionInfo> function_info);
// Support for type checks. // Support for type checks.
void GenerateIsSmi(ZoneList<Expression*>* args); void GenerateIsSmi(ZoneList<Expression*>* args);
@ -660,6 +660,46 @@ class StringCompareStub: public CodeStub {
}; };
// This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame.
class WriteInt32ToHeapNumberStub : public CodeStub {
public:
WriteInt32ToHeapNumberStub(Register the_int,
Register the_heap_number,
Register scratch)
: the_int_(the_int),
the_heap_number_(the_heap_number),
scratch_(scratch) { }
private:
Register the_int_;
Register the_heap_number_;
Register scratch_;
// Minor key encoding in 16 bits.
class IntRegisterBits: public BitField<int, 0, 4> {};
class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
class ScratchRegisterBits: public BitField<int, 8, 4> {};
Major MajorKey() { return WriteInt32ToHeapNumber; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return IntRegisterBits::encode(the_int_.code())
| HeapNumberRegisterBits::encode(the_heap_number_.code())
| ScratchRegisterBits::encode(scratch_.code());
}
void Generate(MacroAssembler* masm);
const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
#ifdef DEBUG
void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
#endif
};
class NumberToStringStub: public CodeStub { class NumberToStringStub: public CodeStub {
public: public:
NumberToStringStub() { } NumberToStringStub() { }

24
deps/v8/src/arm/constants-arm.cc

@ -81,9 +81,27 @@ const char* VFPRegisters::names_[kNumVFPRegisters] = {
}; };
const char* VFPRegisters::Name(int reg) { const char* VFPRegisters::Name(int reg, bool is_double) {
ASSERT((0 <= reg) && (reg < kNumVFPRegisters)); ASSERT((0 <= reg) && (reg < kNumVFPRegisters));
return names_[reg]; return names_[reg + is_double ? kNumVFPSingleRegisters : 0];
}
int VFPRegisters::Number(const char* name, bool* is_double) {
for (int i = 0; i < kNumVFPRegisters; i++) {
if (strcmp(names_[i], name) == 0) {
if (i < kNumVFPSingleRegisters) {
*is_double = false;
return i;
} else {
*is_double = true;
return i - kNumVFPSingleRegisters;
}
}
}
// No register with the requested name found.
return kNoRegister;
} }
@ -104,7 +122,7 @@ int Registers::Number(const char* name) {
i++; i++;
} }
// No register with the reguested name found. // No register with the requested name found.
return kNoRegister; return kNoRegister;
} }

20
deps/v8/src/arm/constants-arm.h

@ -84,7 +84,10 @@ namespace arm {
static const int kNumRegisters = 16; static const int kNumRegisters = 16;
// VFP support. // VFP support.
static const int kNumVFPRegisters = 48; static const int kNumVFPSingleRegisters = 32;
static const int kNumVFPDoubleRegisters = 16;
static const int kNumVFPRegisters =
kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
// PC is register 15. // PC is register 15.
static const int kPCRegister = 15; static const int kPCRegister = 15;
@ -254,6 +257,14 @@ class Instr {
inline int RtField() const { return Bits(15, 12); } inline int RtField() const { return Bits(15, 12); }
inline int PField() const { return Bit(24); } inline int PField() const { return Bit(24); }
inline int UField() const { return Bit(23); } inline int UField() const { return Bit(23); }
inline int Opc1Field() const { return (Bit(23) << 2) | Bits(21, 20); }
inline int Opc2Field() const { return Bits(19, 16); }
inline int Opc3Field() const { return Bits(7, 6); }
inline int SzField() const { return Bit(8); }
inline int VLField() const { return Bit(20); }
inline int VCField() const { return Bit(8); }
inline int VAField() const { return Bits(23, 21); }
inline int VBField() const { return Bits(6, 5); }
// Fields used in Data processing instructions // Fields used in Data processing instructions
inline Opcode OpcodeField() const { inline Opcode OpcodeField() const {
@ -344,7 +355,12 @@ class Registers {
class VFPRegisters { class VFPRegisters {
public: public:
// Return the name of the register. // Return the name of the register.
static const char* Name(int reg); static const char* Name(int reg, bool is_double);
// Lookup the register number for the name provided.
// Set flag pointed by is_double to true if register
// is double-precision.
static int Number(const char* name, bool* is_double);
private: private:
static const char* names_[kNumVFPRegisters]; static const char* names_[kNumVFPRegisters];

2
deps/v8/src/arm/cpu-arm.cc

@ -122,7 +122,7 @@ void CPU::FlushICache(void* start, size_t size) {
void CPU::DebugBreak() { void CPU::DebugBreak() {
#if !defined (__arm__) #if !defined (__arm__) || !defined(CAN_USE_ARMV5_INSTRUCTIONS)
UNIMPLEMENTED(); // when building ARM emulator target UNIMPLEMENTED(); // when building ARM emulator target
#else #else
asm volatile("bkpt 0"); asm volatile("bkpt 0");

242
deps/v8/src/arm/disasm-arm.cc

@ -129,6 +129,10 @@ class Decoder {
void DecodeTypeVFP(Instr* instr); void DecodeTypeVFP(Instr* instr);
void DecodeType6CoprocessorIns(Instr* instr); void DecodeType6CoprocessorIns(Instr* instr);
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr);
void DecodeVCMP(Instr* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instr* instr);
void DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr);
const disasm::NameConverter& converter_; const disasm::NameConverter& converter_;
v8::internal::Vector<char> out_buffer_; v8::internal::Vector<char> out_buffer_;
@ -181,12 +185,12 @@ void Decoder::PrintRegister(int reg) {
// Print the VFP S register name according to the active name converter. // Print the VFP S register name according to the active name converter.
void Decoder::PrintSRegister(int reg) { void Decoder::PrintSRegister(int reg) {
Print(assembler::arm::VFPRegisters::Name(reg)); Print(assembler::arm::VFPRegisters::Name(reg, false));
} }
// Print the VFP D register name according to the active name converter. // Print the VFP D register name according to the active name converter.
void Decoder::PrintDRegister(int reg) { void Decoder::PrintDRegister(int reg) {
Print(assembler::arm::VFPRegisters::Name(reg + 32)); Print(assembler::arm::VFPRegisters::Name(reg, true));
} }
@ -930,87 +934,151 @@ void Decoder::DecodeUnconditional(Instr* instr) {
// VMRS // VMRS
void Decoder::DecodeTypeVFP(Instr* instr) { void Decoder::DecodeTypeVFP(Instr* instr) {
ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) ); ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
ASSERT(instr->Bits(11, 9) == 0x5);
if (instr->Bit(23) == 1) {
if ((instr->Bits(21, 19) == 0x7) && if (instr->Bit(4) == 0) {
(instr->Bits(18, 16) == 0x5) && if (instr->Opc1Field() == 0x7) {
(instr->Bits(11, 9) == 0x5) && // Other data processing instructions
(instr->Bit(8) == 1) && if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
(instr->Bit(6) == 1) && DecodeVCVTBetweenDoubleAndSingle(instr);
(instr->Bit(4) == 0)) { } else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) {
Format(instr, "vcvt.s32.f64'cond 'Sd, 'Dm"); DecodeVCVTBetweenFloatingPointAndInteger(instr);
} else if ((instr->Bits(21, 19) == 0x7) && } else if (((instr->Opc2Field() >> 1) == 0x6) &&
(instr->Bits(18, 16) == 0x0) && (instr->Opc3Field() & 0x1)) {
(instr->Bits(11, 9) == 0x5) && DecodeVCVTBetweenFloatingPointAndInteger(instr);
(instr->Bit(8) == 1) && } else if (((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
(instr->Bit(7) == 1) && (instr->Opc3Field() & 0x1)) {
(instr->Bit(6) == 1) && DecodeVCMP(instr);
(instr->Bit(4) == 0)) { } else {
Format(instr, "vcvt.f64.s32'cond 'Dd, 'Sm"); Unknown(instr); // Not used by V8.
} else if ((instr->Bit(21) == 0x0) && }
(instr->Bit(20) == 0x0) && } else if (instr->Opc1Field() == 0x3) {
(instr->Bits(11, 9) == 0x5) && if (instr->SzField() == 0x1) {
(instr->Bit(8) == 1) && if (instr->Opc3Field() & 0x1) {
(instr->Bit(6) == 0) && Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm");
(instr->Bit(4) == 0)) { } else {
Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm");
}
} else {
Unknown(instr); // Not used by V8.
}
} else if ((instr->Opc1Field() == 0x2) && !(instr->Opc3Field() & 0x1)) {
if (instr->SzField() == 0x1) {
Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
}
} else if ((instr->Opc1Field() == 0x4) && !(instr->Opc3Field() & 0x1)) {
if (instr->SzField() == 0x1) {
Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm"); Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm");
} else if ((instr->Bits(21, 20) == 0x3) && } else {
(instr->Bits(19, 16) == 0x4) && Unknown(instr); // Not used by V8.
(instr->Bits(11, 9) == 0x5) && }
(instr->Bit(8) == 0x1) &&
(instr->Bit(6) == 0x1) &&
(instr->Bit(4) == 0x0)) {
Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
} else if ((instr->Bits(23, 20) == 0xF) &&
(instr->Bits(19, 16) == 0x1) &&
(instr->Bits(11, 8) == 0xA) &&
(instr->Bits(7, 5) == 0x0) &&
(instr->Bit(4) == 0x1) &&
(instr->Bits(3, 0) == 0x0)) {
if (instr->Bits(15, 12) == 0xF)
Format(instr, "vmrs'cond APSR, FPSCR");
else
Unknown(instr); // Not used by V8.
} else { } else {
Unknown(instr); // Not used by V8. Unknown(instr); // Not used by V8.
} }
} else if (instr->Bit(21) == 1) { } else {
if ((instr->Bit(20) == 0x1) && if ((instr->VCField() == 0x0) &&
(instr->Bits(11, 9) == 0x5) && (instr->VAField() == 0x0)) {
(instr->Bit(8) == 0x1) && DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
(instr->Bit(6) == 0) && } else if ((instr->VLField() == 0x1) &&
(instr->Bit(4) == 0)) { (instr->VCField() == 0x0) &&
Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm"); (instr->VAField() == 0x7) &&
} else if ((instr->Bit(20) == 0x1) && (instr->Bits(19, 16) == 0x1)) {
(instr->Bits(11, 9) == 0x5) && if (instr->Bits(15, 12) == 0xF)
(instr->Bit(8) == 0x1) && Format(instr, "vmrs'cond APSR, FPSCR");
(instr->Bit(6) == 1) && else
(instr->Bit(4) == 0)) { Unknown(instr); // Not used by V8.
Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm");
} else if ((instr->Bit(20) == 0x0) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 0x1) &&
(instr->Bit(6) == 0) &&
(instr->Bit(4) == 0)) {
Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm");
} else { } else {
Unknown(instr); // Not used by V8. Unknown(instr); // Not used by V8.
} }
}
}
void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) {
ASSERT((instr->Bit(4) == 1) && (instr->VCField() == 0x0) &&
(instr->VAField() == 0x0));
bool to_arm_register = (instr->VLField() == 0x1);
if (to_arm_register) {
Format(instr, "vmov'cond 'rt, 'Sn");
} else {
Format(instr, "vmov'cond 'Sn, 'rt");
}
}
void Decoder::DecodeVCMP(Instr* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
ASSERT(((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
(instr->Opc3Field() & 0x1));
// Comparison.
bool dp_operation = (instr->SzField() == 1);
bool raise_exception_for_qnan = (instr->Bit(7) == 0x1);
if (dp_operation && !raise_exception_for_qnan) {
Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
} else {
Unknown(instr); // Not used by V8.
}
}
void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
ASSERT((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3));
bool double_to_single = (instr->SzField() == 1);
if (double_to_single) {
Format(instr, "vcvt.f32.f64'cond 'Sd, 'Dm");
} else {
Format(instr, "vcvt.f64.f32'cond 'Dd, 'Sm");
}
}
void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
ASSERT(((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) ||
(((instr->Opc2Field() >> 1) == 0x6) && (instr->Opc3Field() & 0x1)));
bool to_integer = (instr->Bit(18) == 1);
bool dp_operation = (instr->SzField() == 1);
if (to_integer) {
bool unsigned_integer = (instr->Bit(16) == 0);
if (dp_operation) {
if (unsigned_integer) {
Format(instr, "vcvt.u32.f64'cond 'Sd, 'Dm");
} else {
Format(instr, "vcvt.s32.f64'cond 'Sd, 'Dm");
}
} else {
if (unsigned_integer) {
Format(instr, "vcvt.u32.f32'cond 'Sd, 'Sm");
} else {
Format(instr, "vcvt.s32.f32'cond 'Sd, 'Sm");
}
}
} else { } else {
if ((instr->Bit(20) == 0x0) && bool unsigned_integer = (instr->Bit(7) == 0);
(instr->Bits(11, 8) == 0xA) &&
(instr->Bits(6, 5) == 0x0) && if (dp_operation) {
(instr->Bit(4) == 1) && if (unsigned_integer) {
(instr->Bits(3, 0) == 0x0)) { Format(instr, "vcvt.f64.u32'cond 'Dd, 'Sm");
Format(instr, "vmov'cond 'Sn, 'rt"); } else {
} else if ((instr->Bit(20) == 0x1) && Format(instr, "vcvt.f64.s32'cond 'Dd, 'Sm");
(instr->Bits(11, 8) == 0xA) && }
(instr->Bits(6, 5) == 0x0) &&
(instr->Bit(4) == 1) &&
(instr->Bits(3, 0) == 0x0)) {
Format(instr, "vmov'cond 'rt, 'Sn");
} else { } else {
Unknown(instr); // Not used by V8. if (unsigned_integer) {
Format(instr, "vcvt.f32.u32'cond 'Sd, 'Sm");
} else {
Format(instr, "vcvt.f32.s32'cond 'Sd, 'Sm");
}
} }
} }
} }
@ -1024,9 +1092,27 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
void Decoder::DecodeType6CoprocessorIns(Instr* instr) { void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6)); ASSERT((instr->TypeField() == 6));
if (instr->CoprocessorField() != 0xB) { if (instr->CoprocessorField() == 0xA) {
Unknown(instr); // Not used by V8. switch (instr->OpcodeField()) {
} else { case 0x8:
if (instr->HasL()) {
Format(instr, "vldr'cond 'Sd, ['rn - 4*'off8]");
} else {
Format(instr, "vstr'cond 'Sd, ['rn - 4*'off8]");
}
break;
case 0xC:
if (instr->HasL()) {
Format(instr, "vldr'cond 'Sd, ['rn + 4*'off8]");
} else {
Format(instr, "vstr'cond 'Sd, ['rn + 4*'off8]");
}
break;
default:
Unknown(instr); // Not used by V8.
break;
}
} else if (instr->CoprocessorField() == 0xB) {
switch (instr->OpcodeField()) { switch (instr->OpcodeField()) {
case 0x2: case 0x2:
// Load and store double to two GP registers // Load and store double to two GP registers
@ -1056,6 +1142,8 @@ void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
Unknown(instr); // Not used by V8. Unknown(instr); // Not used by V8.
break; break;
} }
} else {
UNIMPLEMENTED(); // Not used by V8.
} }
} }

8
deps/v8/src/arm/full-codegen-arm.cc

@ -667,14 +667,12 @@ void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral"); Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it. // Build the function boilerplate and instantiate it.
Handle<JSFunction> boilerplate = Handle<SharedFunctionInfo> function_info =
Compiler::BuildBoilerplate(expr, script(), this); Compiler::BuildFunctionInfo(expr, script(), this);
if (HasStackOverflow()) return; if (HasStackOverflow()) return;
ASSERT(boilerplate->IsBoilerplate());
// Create a new closure. // Create a new closure.
__ mov(r0, Operand(boilerplate)); __ mov(r0, Operand(function_info));
__ stm(db_w, sp, cp.bit() | r0.bit()); __ stm(db_w, sp, cp.bit() | r0.bit());
__ CallRuntime(Runtime::kNewClosure, 2); __ CallRuntime(Runtime::kNewClosure, 2);
Apply(context_, r0); Apply(context_, r0);

724
deps/v8/src/arm/ic-arm.cc

@ -42,7 +42,6 @@ namespace internal {
#define __ ACCESS_MASM(masm) #define __ ACCESS_MASM(masm)
// Helper function used from LoadIC/CallIC GenerateNormal. // Helper function used from LoadIC/CallIC GenerateNormal.
static void GenerateDictionaryLoad(MacroAssembler* masm, static void GenerateDictionaryLoad(MacroAssembler* masm,
Label* miss, Label* miss,
@ -145,25 +144,6 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
} }
// Helper function used to check that a value is either not an object
// or is loaded if it is an object.
static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm,
Label* miss,
Register value,
Register scratch) {
Label done;
// Check if the value is a Smi.
__ tst(value, Operand(kSmiTagMask));
__ b(eq, &done);
// Check if the object has been loaded.
__ ldr(scratch, FieldMemOperand(value, JSObject::kMapOffset));
__ ldrb(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
__ tst(scratch, Operand(1 << Map::kNeedsLoading));
__ b(ne, miss);
__ bind(&done);
}
void LoadIC::GenerateArrayLength(MacroAssembler* masm) { void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r2 : name // -- r2 : name
@ -292,12 +272,6 @@ static void GenerateNormalHelper(MacroAssembler* masm,
__ CompareObjectType(r1, r0, r0, JS_FUNCTION_TYPE); __ CompareObjectType(r1, r0, r0, JS_FUNCTION_TYPE);
__ b(ne, miss); __ b(ne, miss);
// Check that the function has been loaded.
__ ldr(r0, FieldMemOperand(r1, JSObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kBitField2Offset));
__ tst(r0, Operand(1 << Map::kNeedsLoading));
__ b(ne, miss);
// Patch the receiver with the global proxy if necessary. // Patch the receiver with the global proxy if necessary.
if (is_global_object) { if (is_global_object) {
__ ldr(r0, MemOperand(sp, argc * kPointerSize)); __ ldr(r0, MemOperand(sp, argc * kPointerSize));
@ -469,7 +443,6 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
__ bind(&probe); __ bind(&probe);
GenerateDictionaryLoad(masm, &miss, r1, r0); GenerateDictionaryLoad(masm, &miss, r1, r0);
GenerateCheckNonObjectOrLoaded(masm, &miss, r0, r1);
__ Ret(); __ Ret();
// Global object access: Check access rights. // Global object access: Check access rights.
@ -557,7 +530,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- sp[0] : key // -- sp[0] : key
// -- sp[4] : receiver // -- sp[4] : receiver
// ----------------------------------- // -----------------------------------
Label slow, fast; Label slow, fast, check_pixel_array;
// Get the key and receiver object from the stack. // Get the key and receiver object from the stack.
__ ldm(ia, sp, r0.bit() | r1.bit()); __ ldm(ia, sp, r0.bit() | r1.bit());
@ -595,6 +568,19 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ cmp(r0, Operand(r3)); __ cmp(r0, Operand(r3));
__ b(lo, &fast); __ b(lo, &fast);
// Check whether the elements is a pixel array.
__ bind(&check_pixel_array);
__ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &slow);
__ ldr(ip, FieldMemOperand(r1, PixelArray::kLengthOffset));
__ cmp(r0, ip);
__ b(hs, &slow);
__ ldr(ip, FieldMemOperand(r1, PixelArray::kExternalPointerOffset));
__ ldrb(r0, MemOperand(ip, r0));
__ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Tag result as smi.
__ Ret();
// Slow case: Push extra copies of the arguments (2). // Slow case: Push extra copies of the arguments (2).
__ bind(&slow); __ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1); __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1);
@ -625,10 +611,283 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
} }
// Convert unsigned integer with specified number of leading zeroes in binary
// representation to IEEE 754 double.
// Integer to convert is passed in register hiword.
// Resulting double is returned in registers hiword:loword.
// This functions does not work correctly for 0.
static void GenerateUInt2Double(MacroAssembler* masm,
Register hiword,
Register loword,
Register scratch,
int leading_zeroes) {
const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
const int mantissa_shift_for_hi_word =
meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
const int mantissa_shift_for_lo_word =
kBitsPerInt - mantissa_shift_for_hi_word;
__ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
if (mantissa_shift_for_hi_word > 0) {
__ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
__ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
} else {
__ mov(loword, Operand(0));
__ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
}
// If least significant bit of biased exponent was not 1 it was corrupted
// by most significant bit of mantissa so we should fix that.
if (!(biased_exponent & 1)) {
__ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
}
}
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) { ExternalArrayType array_type) {
// TODO(476): port specialized code. // ---------- S t a t e --------------
GenerateGeneric(masm); // -- lr : return address
// -- sp[0] : key
// -- sp[4] : receiver
// -----------------------------------
Label slow, failed_allocation;
// Get the key and receiver object from the stack.
__ ldm(ia, sp, r0.bit() | r1.bit());
// r0: key
// r1: receiver object
// Check that the object isn't a smi
__ BranchOnSmi(r1, &slow);
// Check that the key is a smi.
__ BranchOnNotSmi(r0, &slow);
// Check that the object is a JS object. Load map into r2.
__ CompareObjectType(r1, r2, r3, FIRST_JS_OBJECT_TYPE);
__ b(lt, &slow);
// Check that the receiver does not require access checks. We need
// to check this explicitly since this generic stub does not perform
// map checks.
__ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
__ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// r0: index (as a smi)
// r1: JSObject
__ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
__ cmp(r2, ip);
__ b(ne, &slow);
// Check that the index is in range.
__ ldr(ip, FieldMemOperand(r1, ExternalArray::kLengthOffset));
__ cmp(r1, Operand(r0, ASR, kSmiTagSize));
// Unsigned comparison catches both negative and too-large values.
__ b(lo, &slow);
// r0: index (smi)
// r1: elements array
__ ldr(r1, FieldMemOperand(r1, ExternalArray::kExternalPointerOffset));
// r1: base pointer of external storage
// We are not untagging smi key and instead work with it
// as if it was premultiplied by 2.
ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
switch (array_type) {
case kExternalByteArray:
__ ldrsb(r0, MemOperand(r1, r0, LSR, 1));
break;
case kExternalUnsignedByteArray:
__ ldrb(r0, MemOperand(r1, r0, LSR, 1));
break;
case kExternalShortArray:
__ ldrsh(r0, MemOperand(r1, r0, LSL, 0));
break;
case kExternalUnsignedShortArray:
__ ldrh(r0, MemOperand(r1, r0, LSL, 0));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ ldr(r0, MemOperand(r1, r0, LSL, 1));
break;
case kExternalFloatArray:
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ add(r0, r1, Operand(r0, LSL, 1));
__ vldr(s0, r0, 0);
} else {
__ ldr(r0, MemOperand(r1, r0, LSL, 1));
}
break;
default:
UNREACHABLE();
break;
}
// For integer array types:
// r0: value
// For floating-point array type
// s0: value (if VFP3 is supported)
// r0: value (if VFP3 is not supported)
if (array_type == kExternalIntArray) {
// For the Int and UnsignedInt array types, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
Label box_int;
__ cmp(r0, Operand(0xC0000000));
__ b(mi, &box_int);
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
__ Ret();
__ bind(&box_int);
__ mov(r1, r0);
// Allocate a HeapNumber for the int and perform int-to-double
// conversion.
__ AllocateHeapNumber(r0, r3, r4, &slow);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, r1);
__ vcvt_f64_s32(d0, s0);
__ sub(r1, r0, Operand(kHeapObjectTag));
__ vstr(d0, r1, HeapNumber::kValueOffset);
__ Ret();
} else {
WriteInt32ToHeapNumberStub stub(r1, r0, r3);
__ TailCallStub(&stub);
}
} else if (array_type == kExternalUnsignedIntArray) {
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
Label box_int, done;
__ tst(r0, Operand(0xC0000000));
__ b(ne, &box_int);
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
__ Ret();
__ bind(&box_int);
__ vmov(s0, r0);
__ AllocateHeapNumber(r0, r1, r2, &slow);
__ vcvt_f64_u32(d0, s0);
__ sub(r1, r0, Operand(kHeapObjectTag));
__ vstr(d0, r1, HeapNumber::kValueOffset);
__ Ret();
} else {
// Check whether unsigned integer fits into smi.
Label box_int_0, box_int_1, done;
__ tst(r0, Operand(0x80000000));
__ b(ne, &box_int_0);
__ tst(r0, Operand(0x40000000));
__ b(ne, &box_int_1);
// Tag integer as smi and return it.
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
__ Ret();
__ bind(&box_int_0);
// Integer does not have leading zeros.
GenerateUInt2Double(masm, r0, r1, r2, 0);
__ b(&done);
__ bind(&box_int_1);
// Integer has one leading zero.
GenerateUInt2Double(masm, r0, r1, r2, 1);
__ bind(&done);
// Integer was converted to double in registers r0:r1.
// Wrap it into a HeapNumber.
__ AllocateHeapNumber(r2, r3, r5, &slow);
__ str(r0, FieldMemOperand(r2, HeapNumber::kExponentOffset));
__ str(r1, FieldMemOperand(r2, HeapNumber::kMantissaOffset));
__ mov(r0, r2);
__ Ret();
}
} else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ AllocateHeapNumber(r0, r1, r2, &slow);
__ vcvt_f64_f32(d0, s0);
__ sub(r1, r0, Operand(kHeapObjectTag));
__ vstr(d0, r1, HeapNumber::kValueOffset);
__ Ret();
} else {
__ AllocateHeapNumber(r3, r1, r2, &slow);
// VFP is not available, do manual single to double conversion.
// r0: floating point value (binary32)
// Extract mantissa to r1.
__ and_(r1, r0, Operand(kBinary32MantissaMask));
// Extract exponent to r2.
__ mov(r2, Operand(r0, LSR, kBinary32MantissaBits));
__ and_(r2, r2, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
Label exponent_rebiased;
__ teq(r2, Operand(0x00));
__ b(eq, &exponent_rebiased);
__ teq(r2, Operand(0xff));
__ mov(r2, Operand(0x7ff), LeaveCC, eq);
__ b(eq, &exponent_rebiased);
// Rebias exponent.
__ add(r2,
r2,
Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
__ bind(&exponent_rebiased);
__ and_(r0, r0, Operand(kBinary32SignMask));
__ orr(r0, r0, Operand(r2, LSL, HeapNumber::kMantissaBitsInTopWord));
// Shift mantissa.
static const int kMantissaShiftForHiWord =
kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
static const int kMantissaShiftForLoWord =
kBitsPerInt - kMantissaShiftForHiWord;
__ orr(r0, r0, Operand(r1, LSR, kMantissaShiftForHiWord));
__ mov(r1, Operand(r1, LSL, kMantissaShiftForLoWord));
__ str(r0, FieldMemOperand(r3, HeapNumber::kExponentOffset));
__ str(r1, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
__ mov(r0, r3);
__ Ret();
}
} else {
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
__ Ret();
}
// Slow case: Load name and receiver from stack and jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r0, r1);
GenerateRuntimeGetProperty(masm);
} }
@ -709,7 +968,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// -- sp[0] : key // -- sp[0] : key
// -- sp[1] : receiver // -- sp[1] : receiver
// ----------------------------------- // -----------------------------------
Label slow, fast, array, extra, exit; Label slow, fast, array, extra, exit, check_pixel_array;
// Get the key and the object from the stack. // Get the key and the object from the stack.
__ ldm(ia, sp, r1.bit() | r3.bit()); // r1 = key, r3 = receiver __ ldm(ia, sp, r1.bit() | r3.bit()); // r1 = key, r3 = receiver
@ -742,7 +1001,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r2, ip); __ cmp(r2, ip);
__ b(ne, &slow); __ b(ne, &check_pixel_array);
// Untag the key (for checking against untagged length in the fixed array). // Untag the key (for checking against untagged length in the fixed array).
__ mov(r1, Operand(r1, ASR, kSmiTagSize)); __ mov(r1, Operand(r1, ASR, kSmiTagSize));
// Compute address to store into and check array bounds. // Compute address to store into and check array bounds.
@ -757,6 +1016,37 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&slow); __ bind(&slow);
GenerateRuntimeSetProperty(masm); GenerateRuntimeSetProperty(masm);
// Check whether the elements is a pixel array.
// r0: value
// r1: index (as a smi), zero-extended.
// r3: elements array
__ bind(&check_pixel_array);
__ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
__ cmp(r2, ip);
__ b(ne, &slow);
// Check that the value is a smi. If a conversion is needed call into the
// runtime to convert and clamp.
__ BranchOnNotSmi(r0, &slow);
__ mov(r1, Operand(r1, ASR, kSmiTagSize)); // Untag the key.
__ ldr(ip, FieldMemOperand(r3, PixelArray::kLengthOffset));
__ cmp(r1, Operand(ip));
__ b(hs, &slow);
__ mov(r4, r0); // Save the value.
__ mov(r0, Operand(r0, ASR, kSmiTagSize)); // Untag the value.
{ // Clamp the value to [0..255].
Label done;
__ tst(r0, Operand(0xFFFFFF00));
__ b(eq, &done);
__ mov(r0, Operand(0), LeaveCC, mi); // 0 if negative.
__ mov(r0, Operand(255), LeaveCC, pl); // 255 if positive.
__ bind(&done);
}
__ ldr(r2, FieldMemOperand(r3, PixelArray::kExternalPointerOffset));
__ strb(r0, MemOperand(r2, r1));
__ mov(r0, Operand(r4)); // Return the original value.
__ Ret();
// Extra capacity case: Check if there is extra capacity to // Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one // perform the store and update the length. Used for adding one
// element to the array by writing to array[array.length]. // element to the array by writing to array[array.length].
@ -819,10 +1109,376 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
} }
// Convert int passed in register ival to IEE 754 single precision
// floating point value and store it into register fval.
// If VFP3 is available use it for conversion.
static void ConvertIntToFloat(MacroAssembler* masm,
Register ival,
Register fval,
Register scratch1,
Register scratch2) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, ival);
__ vcvt_f32_s32(s0, s0);
__ vmov(fval, s0);
} else {
Label not_special, done;
// Move sign bit from source to destination. This works because the sign
// bit in the exponent word of the double has the same position and polarity
// as the 2's complement sign bit in a Smi.
ASSERT(kBinary32SignMask == 0x80000000u);
__ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
// Negate value if it is negative.
__ rsb(ival, ival, Operand(0), LeaveCC, ne);
// We have -1, 0 or 1, which we treat specially. Register ival contains
// absolute value: it is either equal to 1 (special case of -1 and 1),
// greater than 1 (not a special case) or less than 1 (special case of 0).
__ cmp(ival, Operand(1));
__ b(gt, &not_special);
// For 1 or -1 we need to or in the 0 exponent (biased).
static const uint32_t exponent_word_for_1 =
kBinary32ExponentBias << kBinary32ExponentShift;
__ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
__ b(&done);
__ bind(&not_special);
// Count leading zeros.
// Gets the wrong answer for 0, but we already checked for that case above.
Register zeros = scratch2;
__ CountLeadingZeros(ival, scratch1, zeros);
// Compute exponent and or it into the exponent register.
__ rsb(scratch1,
zeros,
Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
__ orr(fval,
fval,
Operand(scratch1, LSL, kBinary32ExponentShift));
// Shift up the source chopping the top bit off.
__ add(zeros, zeros, Operand(1));
// This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
__ mov(ival, Operand(ival, LSL, zeros));
// And the top (top 20 bits).
__ orr(fval,
fval,
Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
__ bind(&done);
}
}
static bool IsElementTypeSigned(ExternalArrayType array_type) {
switch (array_type) {
case kExternalByteArray:
case kExternalShortArray:
case kExternalIntArray:
return true;
case kExternalUnsignedByteArray:
case kExternalUnsignedShortArray:
case kExternalUnsignedIntArray:
return false;
default:
UNREACHABLE();
return false;
}
}
void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) { ExternalArrayType array_type) {
// TODO(476): port specialized code. // ---------- S t a t e --------------
GenerateGeneric(masm); // -- r0 : value
// -- lr : return address
// -- sp[0] : key
// -- sp[1] : receiver
// -----------------------------------
Label slow, check_heap_number;
// Get the key and the object from the stack.
__ ldm(ia, sp, r1.bit() | r2.bit()); // r1 = key, r2 = receiver
// Check that the object isn't a smi.
__ BranchOnSmi(r2, &slow);
// Check that the object is a JS object. Load map into r3
__ CompareObjectType(r2, r3, r4, FIRST_JS_OBJECT_TYPE);
__ b(le, &slow);
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &slow);
// Check that the key is a smi.
__ BranchOnNotSmi(r1, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// r0: value
// r1: index (smi)
// r2: object
__ ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
__ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
__ cmp(r3, ip);
__ b(ne, &slow);
// Check that the index is in range.
__ mov(r1, Operand(r1, ASR, kSmiTagSize)); // Untag the index.
__ ldr(ip, FieldMemOperand(r2, ExternalArray::kLengthOffset));
__ cmp(r1, ip);
// Unsigned comparison catches both negative and too-large values.
__ b(hs, &slow);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// r0: value
// r1: index (integer)
// r2: array
__ BranchOnNotSmi(r0, &check_heap_number);
__ mov(r3, Operand(r0, ASR, kSmiTagSize)); // Untag the value.
__ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset));
// r1: index (integer)
// r2: base pointer of external storage
// r3: value (integer)
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ strb(r3, MemOperand(r2, r1, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ strh(r3, MemOperand(r2, r1, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ str(r3, MemOperand(r2, r1, LSL, 2));
break;
case kExternalFloatArray:
// Need to perform int-to-float conversion.
ConvertIntToFloat(masm, r3, r4, r5, r6);
__ str(r4, MemOperand(r2, r1, LSL, 2));
break;
default:
UNREACHABLE();
break;
}
// r0: value
__ Ret();
// r0: value
// r1: index (integer)
// r2: external array object
__ bind(&check_heap_number);
__ CompareObjectType(r0, r3, r4, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
__ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset));
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r3, r0, Operand(kHeapObjectTag));
__ vldr(d0, r3, HeapNumber::kValueOffset);
if (array_type == kExternalFloatArray) {
__ vcvt_f32_f64(s0, d0);
__ vmov(r3, s0);
__ str(r3, MemOperand(r2, r1, LSL, 2));
} else {
Label done;
// Need to perform float-to-int conversion.
// Test for NaN.
__ vcmp(d0, d0);
// Move vector status bits to normal status bits.
__ vmrs(v8::internal::pc);
__ mov(r3, Operand(0), LeaveCC, vs); // NaN converts to 0
__ b(vs, &done);
// Test whether exponent equal to 0x7FF (infinity or NaN)
__ vmov(r4, r3, d0);
__ mov(r5, Operand(0x7FF00000));
__ and_(r3, r3, Operand(r5));
__ teq(r3, Operand(r5));
__ mov(r3, Operand(0), LeaveCC, eq);
// Not infinity or NaN simply convert to int
if (IsElementTypeSigned(array_type)) {
__ vcvt_s32_f64(s0, d0, ne);
} else {
__ vcvt_u32_f64(s0, d0, ne);
}
__ vmov(r3, s0, ne);
__ bind(&done);
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ strb(r3, MemOperand(r2, r1, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ strh(r3, MemOperand(r2, r1, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ str(r3, MemOperand(r2, r1, LSL, 2));
break;
default:
UNREACHABLE();
break;
}
}
// r0: original value
__ Ret();
} else {
// VFP3 is not available do manual conversions
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ ldr(r4, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
if (array_type == kExternalFloatArray) {
Label done, nan_or_infinity_or_zero;
static const int kMantissaInHiWordShift =
kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
static const int kMantissaInLoWordShift =
kBitsPerInt - kMantissaInHiWordShift;
// Test for all special exponent values: zeros, subnormal numbers, NaNs
// and infinities. All these should be converted to 0.
__ mov(r5, Operand(HeapNumber::kExponentMask));
__ and_(r6, r3, Operand(r5), SetCC);
__ b(eq, &nan_or_infinity_or_zero);
__ teq(r6, Operand(r5));
__ mov(r6, Operand(kBinary32ExponentMask), LeaveCC, eq);
__ b(eq, &nan_or_infinity_or_zero);
// Rebias exponent.
__ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift));
__ add(r6,
r6,
Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
__ cmp(r6, Operand(kBinary32MaxExponent));
__ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, gt);
__ orr(r3, r3, Operand(kBinary32ExponentMask), LeaveCC, gt);
__ b(gt, &done);
__ cmp(r6, Operand(kBinary32MinExponent));
__ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, lt);
__ b(lt, &done);
__ and_(r7, r3, Operand(HeapNumber::kSignMask));
__ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
__ orr(r7, r7, Operand(r3, LSL, kMantissaInHiWordShift));
__ orr(r7, r7, Operand(r4, LSR, kMantissaInLoWordShift));
__ orr(r3, r7, Operand(r6, LSL, kBinary32ExponentShift));
__ bind(&done);
__ str(r3, MemOperand(r2, r1, LSL, 2));
__ Ret();
__ bind(&nan_or_infinity_or_zero);
__ and_(r7, r3, Operand(HeapNumber::kSignMask));
__ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
__ orr(r6, r6, r7);
__ orr(r6, r6, Operand(r3, LSL, kMantissaInHiWordShift));
__ orr(r3, r6, Operand(r4, LSR, kMantissaInLoWordShift));
__ b(&done);
} else {
bool is_signed_type = IsElementTypeSigned(array_type);
int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
Label done, sign;
// Test for all special exponent values: zeros, subnormal numbers, NaNs
// and infinities. All these should be converted to 0.
__ mov(r5, Operand(HeapNumber::kExponentMask));
__ and_(r6, r3, Operand(r5), SetCC);
__ mov(r3, Operand(0), LeaveCC, eq);
__ b(eq, &done);
__ teq(r6, Operand(r5));
__ mov(r3, Operand(0), LeaveCC, eq);
__ b(eq, &done);
// Unbias exponent.
__ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift));
__ sub(r6, r6, Operand(HeapNumber::kExponentBias), SetCC);
// If exponent is negative than result is 0.
__ mov(r3, Operand(0), LeaveCC, mi);
__ b(mi, &done);
// If exponent is too big than result is minimal value
__ cmp(r6, Operand(meaningfull_bits - 1));
__ mov(r3, Operand(min_value), LeaveCC, ge);
__ b(ge, &done);
__ and_(r5, r3, Operand(HeapNumber::kSignMask), SetCC);
__ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
__ orr(r3, r3, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
__ rsb(r6, r6, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
__ mov(r3, Operand(r3, LSR, r6), LeaveCC, pl);
__ b(pl, &sign);
__ rsb(r6, r6, Operand(0));
__ mov(r3, Operand(r3, LSL, r6));
__ rsb(r6, r6, Operand(meaningfull_bits));
__ orr(r3, r3, Operand(r4, LSR, r6));
__ bind(&sign);
__ teq(r5, Operand(0));
__ rsb(r3, r3, Operand(0), LeaveCC, ne);
__ bind(&done);
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ strb(r3, MemOperand(r2, r1, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ strh(r3, MemOperand(r2, r1, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ str(r3, MemOperand(r2, r1, LSL, 2));
break;
default:
UNREACHABLE();
break;
}
}
}
// Slow case: call runtime.
__ bind(&slow);
GenerateRuntimeSetProperty(masm);
} }

31
deps/v8/src/arm/jump-target-arm.cc

@ -173,14 +173,7 @@ void BreakTarget::Jump() {
void BreakTarget::Jump(Result* arg) { void BreakTarget::Jump(Result* arg) {
// On ARM we do not currently emit merge code for jumps, so we need to do UNIMPLEMENTED();
// it explicitly here. The only merging necessary is to drop extra
// statement state from the stack.
ASSERT(cgen()->has_valid_frame());
int count = cgen()->frame()->height() - expected_height_;
cgen()->frame()->Drop(count);
cgen()->frame()->Push(arg);
DoJump();
} }
@ -209,27 +202,7 @@ void BreakTarget::Bind() {
void BreakTarget::Bind(Result* arg) { void BreakTarget::Bind(Result* arg) {
#ifdef DEBUG UNIMPLEMENTED();
// All the forward-reaching frames should have been adjusted at the
// jumps to this target.
for (int i = 0; i < reaching_frames_.length(); i++) {
ASSERT(reaching_frames_[i] == NULL ||
reaching_frames_[i]->height() == expected_height_ + 1);
}
#endif
// Drop leftover statement state from the frame before merging, even
// on the fall through. This is so we can bind the return target
// with state on the frame.
if (cgen()->has_valid_frame()) {
int count = cgen()->frame()->height() - expected_height_;
// On ARM we do not currently emit merge code at binding sites, so we need
// to do it explicitly here. The only merging necessary is to drop extra
// statement state from the stack.
cgen()->frame()->ForgetElements(count);
cgen()->frame()->Push(arg);
}
DoBind();
*arg = cgen()->frame()->Pop();
} }

54
deps/v8/src/arm/macro-assembler-arm.cc

@ -1192,7 +1192,7 @@ void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
// ARMv7 VFP3 instructions to implement integer to double conversion. // ARMv7 VFP3 instructions to implement integer to double conversion.
mov(r7, Operand(inReg, ASR, kSmiTagSize)); mov(r7, Operand(inReg, ASR, kSmiTagSize));
vmov(s15, r7); vmov(s15, r7);
vcvt(d7, s15); vcvt_f64_s32(d7, s15);
vmov(outLowReg, outHighReg, d7); vmov(outLowReg, outHighReg, d7);
} }
@ -1455,6 +1455,58 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
} }
// Allocates a heap number or jumps to the need_gc label if the young space
// is full and a scavenge is needed.
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
Label* gc_required) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
result,
scratch1,
scratch2,
gc_required,
TAG_OBJECT);
// Get heap number map and store it in the allocated object.
LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
}
void MacroAssembler::CountLeadingZeros(Register source,
Register scratch,
Register zeros) {
#ifdef CAN_USE_ARMV5_INSTRUCTIONS
clz(zeros, source); // This instruction is only supported after ARM5.
#else
mov(zeros, Operand(0));
mov(scratch, source);
// Top 16.
tst(scratch, Operand(0xffff0000));
add(zeros, zeros, Operand(16), LeaveCC, eq);
mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
// Top 8.
tst(scratch, Operand(0xff000000));
add(zeros, zeros, Operand(8), LeaveCC, eq);
mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
// Top 4.
tst(scratch, Operand(0xf0000000));
add(zeros, zeros, Operand(4), LeaveCC, eq);
mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
// Top 2.
tst(scratch, Operand(0xc0000000));
add(zeros, zeros, Operand(2), LeaveCC, eq);
mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
// Top bit.
tst(scratch, Operand(0x80000000u));
add(zeros, zeros, Operand(1), LeaveCC, eq);
#endif
}
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
Register first, Register first,
Register second, Register second,

12
deps/v8/src/arm/macro-assembler-arm.h

@ -239,6 +239,12 @@ class MacroAssembler: public Assembler {
Register scratch2, Register scratch2,
Label* gc_required); Label* gc_required);
// Allocates a heap number or jumps to the need_gc label if the young space
// is full and a scavenge is needed.
void AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
Label* gc_required);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Support functions. // Support functions.
@ -319,6 +325,12 @@ class MacroAssembler: public Assembler {
Register outHighReg, Register outHighReg,
Register outLowReg); Register outLowReg);
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
// instruction. On pre-ARM5 hardware this routine gives the wrong answer
// for 0 (31 instead of 32).
void CountLeadingZeros(Register source,
Register scratch,
Register zeros);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Runtime calls // Runtime calls

346
deps/v8/src/arm/simulator-arm.cc

@ -72,6 +72,8 @@ class Debugger {
int32_t GetRegisterValue(int regnum); int32_t GetRegisterValue(int regnum);
bool GetValue(const char* desc, int32_t* value); bool GetValue(const char* desc, int32_t* value);
bool GetVFPSingleValue(const char* desc, float* value);
bool GetVFPDoubleValue(const char* desc, double* value);
// Set or delete a breakpoint. Returns true if successful. // Set or delete a breakpoint. Returns true if successful.
bool SetBreakpoint(Instr* breakpc); bool SetBreakpoint(Instr* breakpc);
@ -154,6 +156,28 @@ bool Debugger::GetValue(const char* desc, int32_t* value) {
} }
bool Debugger::GetVFPSingleValue(const char* desc, float* value) {
bool is_double;
int regnum = VFPRegisters::Number(desc, &is_double);
if (regnum != kNoRegister && !is_double) {
*value = sim_->get_float_from_s_register(regnum);
return true;
}
return false;
}
bool Debugger::GetVFPDoubleValue(const char* desc, double* value) {
bool is_double;
int regnum = VFPRegisters::Number(desc, &is_double);
if (regnum != kNoRegister && is_double) {
*value = sim_->get_double_from_d_register(regnum);
return true;
}
return false;
}
bool Debugger::SetBreakpoint(Instr* breakpc) { bool Debugger::SetBreakpoint(Instr* breakpc) {
// Check if a breakpoint can be set. If not return without any side-effects. // Check if a breakpoint can be set. If not return without any side-effects.
if (sim_->break_pc_ != NULL) { if (sim_->break_pc_ != NULL) {
@ -249,6 +273,8 @@ void Debugger::Debug() {
} else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) { } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
if (args == 2) { if (args == 2) {
int32_t value; int32_t value;
float svalue;
double dvalue;
if (strcmp(arg1, "all") == 0) { if (strcmp(arg1, "all") == 0) {
for (int i = 0; i < kNumRegisters; i++) { for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i); value = GetRegisterValue(i);
@ -257,6 +283,10 @@ void Debugger::Debug() {
} else { } else {
if (GetValue(arg1, &value)) { if (GetValue(arg1, &value)) {
PrintF("%s: 0x%08x %d \n", arg1, value, value); PrintF("%s: 0x%08x %d \n", arg1, value, value);
} else if (GetVFPSingleValue(arg1, &svalue)) {
PrintF("%s: %f \n", arg1, svalue);
} else if (GetVFPDoubleValue(arg1, &dvalue)) {
PrintF("%s: %lf \n", arg1, dvalue);
} else { } else {
PrintF("%s unrecognized\n", arg1); PrintF("%s unrecognized\n", arg1);
} }
@ -1919,6 +1949,13 @@ void Simulator::DecodeUnconditional(Instr* instr) {
} }
// Depending on value of last_bit flag glue register code from vm and m values
// (where m is expected to be a single bit).
static int GlueRegCode(bool last_bit, int vm, int m) {
return last_bit ? ((vm << 1) | m) : ((m << 4) | vm);
}
// void Simulator::DecodeTypeVFP(Instr* instr) // void Simulator::DecodeTypeVFP(Instr* instr)
// The Following ARMv7 VFPv instructions are currently supported. // The Following ARMv7 VFPv instructions are currently supported.
// vmov :Sn = Rt // vmov :Sn = Rt
@ -1933,114 +1970,212 @@ void Simulator::DecodeUnconditional(Instr* instr) {
// VMRS // VMRS
void Simulator::DecodeTypeVFP(Instr* instr) { void Simulator::DecodeTypeVFP(Instr* instr) {
ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) ); ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
ASSERT(instr->Bits(11, 9) == 0x5);
int rt = instr->RtField();
int vm = instr->VmField(); int vm = instr->VmField();
int vn = instr->VnField();
int vd = instr->VdField(); int vd = instr->VdField();
int vn = instr->VnField();
if (instr->Bit(4) == 0) {
if (instr->Opc1Field() == 0x7) {
// Other data processing instructions
if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
} else if (((instr->Opc2Field() >> 1) == 0x6) &&
(instr->Opc3Field() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
} else if (((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
(instr->Opc3Field() & 0x1)) {
DecodeVCMP(instr);
} else {
UNREACHABLE(); // Not used by V8.
}
} else if (instr->Opc1Field() == 0x3) {
if (instr->SzField() != 0x1) {
UNREACHABLE(); // Not used by V8.
}
if (instr->Opc3Field() & 0x1) {
// vsub
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value - dm_value;
set_d_register_from_double(vd, dd_value);
} else {
// vadd
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value + dm_value;
set_d_register_from_double(vd, dd_value);
}
} else if ((instr->Opc1Field() == 0x2) && !(instr->Opc3Field() & 0x1)) {
// vmul
if (instr->SzField() != 0x1) {
UNREACHABLE(); // Not used by V8.
}
if (instr->Bit(23) == 1) {
if ((instr->Bits(21, 19) == 0x7) &&
(instr->Bits(18, 16) == 0x5) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 1) &&
(instr->Bit(6) == 1) &&
(instr->Bit(4) == 0)) {
double dm_val = get_double_from_d_register(vm);
int32_t int_value = static_cast<int32_t>(dm_val);
set_s_register_from_sinteger(((vd<<1) | instr->DField()), int_value);
} else if ((instr->Bits(21, 19) == 0x7) &&
(instr->Bits(18, 16) == 0x0) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 1) &&
(instr->Bit(7) == 1) &&
(instr->Bit(6) == 1) &&
(instr->Bit(4) == 0)) {
int32_t int_value = get_sinteger_from_s_register(((vm<<1) |
instr->MField()));
double dbl_value = static_cast<double>(int_value);
set_d_register_from_double(vd, dbl_value);
} else if ((instr->Bit(21) == 0x0) &&
(instr->Bit(20) == 0x0) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 1) &&
(instr->Bit(6) == 0) &&
(instr->Bit(4) == 0)) {
double dn_value = get_double_from_d_register(vn); double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm); double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value / dm_value; double dd_value = dn_value * dm_value;
set_d_register_from_double(vd, dd_value); set_d_register_from_double(vd, dd_value);
} else if ((instr->Bits(21, 20) == 0x3) && } else if ((instr->Opc1Field() == 0x4) && !(instr->Opc3Field() & 0x1)) {
(instr->Bits(19, 16) == 0x4) && // vdiv
(instr->Bits(11, 9) == 0x5) && if (instr->SzField() != 0x1) {
(instr->Bit(8) == 0x1) && UNREACHABLE(); // Not used by V8.
(instr->Bit(6) == 0x1) && }
(instr->Bit(4) == 0x0)) {
double dd_value = get_double_from_d_register(vd); double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm); double dm_value = get_double_from_d_register(vm);
Compute_FPSCR_Flags(dd_value, dm_value); double dd_value = dn_value / dm_value;
} else if ((instr->Bits(23, 20) == 0xF) && set_d_register_from_double(vd, dd_value);
(instr->Bits(19, 16) == 0x1) && } else {
(instr->Bits(11, 8) == 0xA) && UNIMPLEMENTED(); // Not used by V8.
(instr->Bits(7, 5) == 0x0) && }
(instr->Bit(4) == 0x1) && } else {
(instr->Bits(3, 0) == 0x0)) { if ((instr->VCField() == 0x0) &&
if (instr->Bits(15, 12) == 0xF) (instr->VAField() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
} else if ((instr->VLField() == 0x1) &&
(instr->VCField() == 0x0) &&
(instr->VAField() == 0x7) &&
(instr->Bits(19, 16) == 0x1)) {
// vmrs
if (instr->RtField() == 0xF)
Copy_FPSCR_to_APSR(); Copy_FPSCR_to_APSR();
else else
UNIMPLEMENTED(); // Not used by V8. UNIMPLEMENTED(); // Not used by V8.
} else { } else {
UNIMPLEMENTED(); // Not used by V8. UNIMPLEMENTED(); // Not used by V8.
} }
} else if (instr->Bit(21) == 1) { }
if ((instr->Bit(20) == 0x1) && }
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 0x1) &&
(instr->Bit(6) == 0) && void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) {
(instr->Bit(4) == 0)) { ASSERT((instr->Bit(4) == 1) && (instr->VCField() == 0x0) &&
double dn_value = get_double_from_d_register(vn); (instr->VAField() == 0x0));
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value + dm_value; int t = instr->RtField();
set_d_register_from_double(vd, dd_value); int n = GlueRegCode(true, instr->VnField(), instr->NField());
} else if ((instr->Bit(20) == 0x1) && bool to_arm_register = (instr->VLField() == 0x1);
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 0x1) && if (to_arm_register) {
(instr->Bit(6) == 1) && int32_t int_value = get_sinteger_from_s_register(n);
(instr->Bit(4) == 0)) { set_register(t, int_value);
double dn_value = get_double_from_d_register(vn); } else {
double dm_value = get_double_from_d_register(vm); int32_t rs_val = get_register(t);
double dd_value = dn_value - dm_value; set_s_register_from_sinteger(n, rs_val);
set_d_register_from_double(vd, dd_value); }
} else if ((instr->Bit(20) == 0x0) && }
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 0x1) &&
(instr->Bit(6) == 0) && void Simulator::DecodeVCMP(Instr* instr) {
(instr->Bit(4) == 0)) { ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
double dn_value = get_double_from_d_register(vn); ASSERT(((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
double dm_value = get_double_from_d_register(vm); (instr->Opc3Field() & 0x1));
double dd_value = dn_value * dm_value;
set_d_register_from_double(vd, dd_value); // Comparison.
} else { bool dp_operation = (instr->SzField() == 1);
if (instr->Bit(7) != 0) {
// Raising exceptions for quiet NaNs are not supported.
UNIMPLEMENTED(); // Not used by V8.
}
int d = GlueRegCode(!dp_operation, instr->VdField(), instr->DField());
int m = GlueRegCode(!dp_operation, instr->VmField(), instr->MField());
if (dp_operation) {
double dd_value = get_double_from_d_register(d);
double dm_value = get_double_from_d_register(m);
Compute_FPSCR_Flags(dd_value, dm_value);
} else {
UNIMPLEMENTED(); // Not used by V8.
}
}
void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
ASSERT((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3));
bool double_to_single = (instr->SzField() == 1);
int dst = GlueRegCode(double_to_single, instr->VdField(), instr->DField());
int src = GlueRegCode(!double_to_single, instr->VmField(), instr->MField());
if (double_to_single) {
double val = get_double_from_d_register(src);
set_s_register_from_float(dst, static_cast<float>(val));
} else {
float val = get_float_from_s_register(src);
set_d_register_from_double(dst, static_cast<double>(val));
}
}
void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
ASSERT(((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) ||
(((instr->Opc2Field() >> 1) == 0x6) && (instr->Opc3Field() & 0x1)));
// Conversion between floating-point and integer.
int vd = instr->VdField();
int d = instr->DField();
int vm = instr->VmField();
int m = instr->MField();
bool to_integer = (instr->Bit(18) == 1);
bool dp_operation = (instr->SzField() == 1);
if (to_integer) {
bool unsigned_integer = (instr->Bit(16) == 0);
if (instr->Bit(7) != 1) {
// Only rounding towards zero supported.
UNIMPLEMENTED(); // Not used by V8. UNIMPLEMENTED(); // Not used by V8.
} }
int dst = GlueRegCode(true, vd, d);
int src = GlueRegCode(!dp_operation, vm, m);
if (dp_operation) {
double val = get_double_from_d_register(src);
int sint = unsigned_integer ? static_cast<uint32_t>(val) :
static_cast<int32_t>(val);
set_s_register_from_sinteger(dst, sint);
} else {
float val = get_float_from_s_register(src);
int sint = unsigned_integer ? static_cast<uint32_t>(val) :
static_cast<int32_t>(val);
set_s_register_from_sinteger(dst, sint);
}
} else { } else {
if ((instr->Bit(20) == 0x0) && bool unsigned_integer = (instr->Bit(7) == 0);
(instr->Bits(11, 8) == 0xA) &&
(instr->Bits(6, 5) == 0x0) && int dst = GlueRegCode(!dp_operation, vd, d);
(instr->Bit(4) == 1) && int src = GlueRegCode(true, vm, m);
(instr->Bits(3, 0) == 0x0)) {
int32_t rs_val = get_register(rt); int val = get_sinteger_from_s_register(src);
set_s_register_from_sinteger(((vn<<1) | instr->NField()), rs_val);
} else if ((instr->Bit(20) == 0x1) && if (dp_operation) {
(instr->Bits(11, 8) == 0xA) && if (unsigned_integer) {
(instr->Bits(6, 5) == 0x0) && set_d_register_from_double(dst,
(instr->Bit(4) == 1) && static_cast<double>((uint32_t)val));
(instr->Bits(3, 0) == 0x0)) { } else {
int32_t int_value = get_sinteger_from_s_register(((vn<<1) | set_d_register_from_double(dst, static_cast<double>(val));
instr->NField())); }
set_register(rt, int_value);
} else { } else {
UNIMPLEMENTED(); // Not used by V8. if (unsigned_integer) {
set_s_register_from_float(dst,
static_cast<float>((uint32_t)val));
} else {
set_s_register_from_float(dst, static_cast<float>(val));
}
} }
} }
} }
@ -2055,9 +2190,32 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
void Simulator::DecodeType6CoprocessorIns(Instr* instr) { void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6)); ASSERT((instr->TypeField() == 6));
if (instr->CoprocessorField() != 0xB) { if (instr->CoprocessorField() == 0xA) {
UNIMPLEMENTED(); // Not used by V8. switch (instr->OpcodeField()) {
} else { case 0x8:
case 0xC: { // Load and store float to memory.
int rn = instr->RnField();
int vd = instr->VdField();
int offset = instr->Immed8Field();
if (!instr->HasU()) {
offset = -offset;
}
int32_t address = get_register(rn) + 4 * offset;
if (instr->HasL()) {
// Load double from memory: vldr.
set_s_register_from_sinteger(vd, ReadW(address, instr));
} else {
// Store double to memory: vstr.
WriteW(address, get_sinteger_from_s_register(vd), instr);
}
break;
}
default:
UNIMPLEMENTED(); // Not used by V8.
break;
}
} else if (instr->CoprocessorField() == 0xB) {
switch (instr->OpcodeField()) { switch (instr->OpcodeField()) {
case 0x2: case 0x2:
// Load and store double to two GP registers // Load and store double to two GP registers
@ -2106,6 +2264,8 @@ void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
UNIMPLEMENTED(); // Not used by V8. UNIMPLEMENTED(); // Not used by V8.
break; break;
} }
} else {
UNIMPLEMENTED(); // Not used by V8.
} }
} }

5
deps/v8/src/arm/simulator-arm.h

@ -231,6 +231,11 @@ class Simulator {
void DecodeTypeVFP(Instr* instr); void DecodeTypeVFP(Instr* instr);
void DecodeType6CoprocessorIns(Instr* instr); void DecodeType6CoprocessorIns(Instr* instr);
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr);
void DecodeVCMP(Instr* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instr* instr);
void DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr);
// Executes one instruction. // Executes one instruction.
void InstructionDecode(Instr* instr); void InstructionDecode(Instr* instr);

21
deps/v8/src/arm/virtual-frame-arm.cc

@ -47,16 +47,6 @@ void VirtualFrame::SyncElementByPushing(int index) {
} }
void VirtualFrame::SyncRange(int begin, int end) {
// All elements are in memory on ARM (ie, synced).
#ifdef DEBUG
for (int i = begin; i <= end; i++) {
ASSERT(elements_[i].is_synced());
}
#endif
}
void VirtualFrame::MergeTo(VirtualFrame* expected) { void VirtualFrame::MergeTo(VirtualFrame* expected) {
// ARM frames are currently always in memory. // ARM frames are currently always in memory.
ASSERT(Equals(expected)); ASSERT(Equals(expected));
@ -270,12 +260,7 @@ void VirtualFrame::Drop(int count) {
} }
// Discard elements from the virtual frame and free any registers. // Discard elements from the virtual frame and free any registers.
for (int i = 0; i < count; i++) { element_count_ -= count;
FrameElement dropped = elements_.RemoveLast();
if (dropped.is_register()) {
Unuse(dropped.reg());
}
}
} }
@ -288,14 +273,14 @@ Result VirtualFrame::Pop() {
void VirtualFrame::EmitPop(Register reg) { void VirtualFrame::EmitPop(Register reg) {
ASSERT(stack_pointer_ == element_count() - 1); ASSERT(stack_pointer_ == element_count() - 1);
stack_pointer_--; stack_pointer_--;
elements_.RemoveLast(); element_count_--;
__ pop(reg); __ pop(reg);
} }
void VirtualFrame::EmitPush(Register reg) { void VirtualFrame::EmitPush(Register reg) {
ASSERT(stack_pointer_ == element_count() - 1); ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown())); element_count_++;
stack_pointer_++; stack_pointer_++;
__ push(reg); __ push(reg);
} }

67
deps/v8/src/arm/virtual-frame-arm.h

@ -67,12 +67,8 @@ class VirtualFrame : public ZoneObject {
CodeGenerator* cgen() { return CodeGeneratorScope::Current(); } CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
MacroAssembler* masm() { return cgen()->masm(); } MacroAssembler* masm() { return cgen()->masm(); }
// Create a duplicate of an existing valid frame element.
FrameElement CopyElementAt(int index,
NumberInfo info = NumberInfo::Unknown());
// The number of elements on the virtual frame. // The number of elements on the virtual frame.
int element_count() { return elements_.length(); } int element_count() { return element_count_; }
// The height of the virtual expression stack. // The height of the virtual expression stack.
int height() { int height() {
@ -115,7 +111,7 @@ class VirtualFrame : public ZoneObject {
stack_pointer_ -= count; stack_pointer_ -= count;
// On ARM, all elements are in memory, so there is no extra bookkeeping // On ARM, all elements are in memory, so there is no extra bookkeeping
// (registers, copies, etc.) beyond dropping the elements. // (registers, copies, etc.) beyond dropping the elements.
elements_.Rewind(stack_pointer_ + 1); element_count_ -= count;
} }
// Forget count elements from the top of the frame and adjust the stack // Forget count elements from the top of the frame and adjust the stack
@ -124,7 +120,7 @@ class VirtualFrame : public ZoneObject {
void ForgetElements(int count); void ForgetElements(int count);
// Spill all values from the frame to memory. // Spill all values from the frame to memory.
void SpillAll(); inline void SpillAll();
// Spill all occurrences of a specific register from the frame. // Spill all occurrences of a specific register from the frame.
void Spill(Register reg) { void Spill(Register reg) {
@ -179,7 +175,7 @@ class VirtualFrame : public ZoneObject {
// dropping all non-locals elements in the virtual frame. This // dropping all non-locals elements in the virtual frame. This
// avoids generating unnecessary merge code when jumping to the // avoids generating unnecessary merge code when jumping to the
// shared return site. Emits code for spills. // shared return site. Emits code for spills.
void PrepareForReturn(); inline void PrepareForReturn();
// Number of local variables after when we use a loop for allocating. // Number of local variables after when we use a loop for allocating.
static const int kLocalVarBound = 5; static const int kLocalVarBound = 5;
@ -205,10 +201,6 @@ class VirtualFrame : public ZoneObject {
SetElementAt(index, &temp); SetElementAt(index, &temp);
} }
void PushElementAt(int index) {
PushFrameSlotAt(element_count() - index - 1);
}
// A frame-allocated local as an assembly operand. // A frame-allocated local as an assembly operand.
MemOperand LocalAt(int index) { MemOperand LocalAt(int index) {
ASSERT(0 <= index); ASSERT(0 <= index);
@ -216,11 +208,6 @@ class VirtualFrame : public ZoneObject {
return MemOperand(fp, kLocal0Offset - index * kPointerSize); return MemOperand(fp, kLocal0Offset - index * kPointerSize);
} }
// Push a copy of the value of a local frame slot on top of the frame.
void PushLocalAt(int index) {
PushFrameSlotAt(local0_index() + index);
}
// Push the value of a local frame slot on top of the frame and invalidate // Push the value of a local frame slot on top of the frame and invalidate
// the local slot. The slot should be written to before trying to read // the local slot. The slot should be written to before trying to read
// from it again. // from it again.
@ -228,21 +215,12 @@ class VirtualFrame : public ZoneObject {
TakeFrameSlotAt(local0_index() + index); TakeFrameSlotAt(local0_index() + index);
} }
// Store the top value on the virtual frame into a local frame slot. The
// value is left in place on top of the frame.
void StoreToLocalAt(int index) {
StoreToFrameSlotAt(local0_index() + index);
}
// Push the address of the receiver slot on the frame. // Push the address of the receiver slot on the frame.
void PushReceiverSlotAddress(); void PushReceiverSlotAddress();
// The function frame slot. // The function frame slot.
MemOperand Function() { return MemOperand(fp, kFunctionOffset); } MemOperand Function() { return MemOperand(fp, kFunctionOffset); }
// Push the function on top of the frame.
void PushFunction() { PushFrameSlotAt(function_index()); }
// The context frame slot. // The context frame slot.
MemOperand Context() { return MemOperand(fp, kContextOffset); } MemOperand Context() { return MemOperand(fp, kContextOffset); }
@ -261,11 +239,6 @@ class VirtualFrame : public ZoneObject {
return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize); return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize);
} }
// Push a copy of the value of a parameter frame slot on top of the frame.
void PushParameterAt(int index) {
PushFrameSlotAt(param0_index() + index);
}
// Push the value of a paramter frame slot on top of the frame and // Push the value of a paramter frame slot on top of the frame and
// invalidate the parameter slot. The slot should be written to before // invalidate the parameter slot. The slot should be written to before
// trying to read from it again. // trying to read from it again.
@ -323,9 +296,6 @@ class VirtualFrame : public ZoneObject {
// Drop one element. // Drop one element.
void Drop() { Drop(1); } void Drop() { Drop(1); }
// Duplicate the top element of the frame.
void Dup() { PushFrameSlotAt(element_count() - 1); }
// Pop an element from the top of the expression stack. Returns a // Pop an element from the top of the expression stack. Returns a
// Result, which may be a constant or a register. // Result, which may be a constant or a register.
Result Pop(); Result Pop();
@ -344,28 +314,16 @@ class VirtualFrame : public ZoneObject {
void EmitPushMultiple(int count, int src_regs); void EmitPushMultiple(int count, int src_regs);
// Push an element on the virtual frame. // Push an element on the virtual frame.
inline void Push(Register reg, NumberInfo info = NumberInfo::Unknown());
inline void Push(Handle<Object> value); inline void Push(Handle<Object> value);
inline void Push(Smi* value); inline void Push(Smi* value);
// Pushing a result invalidates it (its contents become owned by the frame).
void Push(Result* result) {
if (result->is_register()) {
Push(result->reg());
} else {
ASSERT(result->is_constant());
Push(result->handle());
}
result->Unuse();
}
// Nip removes zero or more elements from immediately below the top // Nip removes zero or more elements from immediately below the top
// of the frame, leaving the previous top-of-frame value on top of // of the frame, leaving the previous top-of-frame value on top of
// the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x). // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
inline void Nip(int num_dropped); inline void Nip(int num_dropped);
inline void SetTypeForLocalAt(int index, NumberInfo info); inline void SetTypeForLocalAt(int index, TypeInfo info);
inline void SetTypeForParamAt(int index, NumberInfo info); inline void SetTypeForParamAt(int index, TypeInfo info);
private: private:
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset; static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
@ -375,7 +333,8 @@ class VirtualFrame : public ZoneObject {
static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize; static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots. static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
ZoneList<FrameElement> elements_; // The number of elements on the stack frame.
int element_count_;
// The index of the element that is at the processor's stack pointer // The index of the element that is at the processor's stack pointer
// (the sp register). // (the sp register).
@ -449,19 +408,12 @@ class VirtualFrame : public ZoneObject {
// Keep the element type as register or constant, and clear the dirty bit. // Keep the element type as register or constant, and clear the dirty bit.
void SyncElementAt(int index); void SyncElementAt(int index);
// Sync the range of elements in [begin, end] with memory.
void SyncRange(int begin, int end);
// Sync a single unsynced element that lies beneath or at the stack pointer. // Sync a single unsynced element that lies beneath or at the stack pointer.
void SyncElementBelowStackPointer(int index); void SyncElementBelowStackPointer(int index);
// Sync a single unsynced element that lies just above the stack pointer. // Sync a single unsynced element that lies just above the stack pointer.
void SyncElementByPushing(int index); void SyncElementByPushing(int index);
// Push a copy of a frame slot (typically a local or parameter) on top of
// the frame.
inline void PushFrameSlotAt(int index);
// Push a the value of a frame slot (typically a local or parameter) on // Push a the value of a frame slot (typically a local or parameter) on
// top of the frame and invalidate the slot. // top of the frame and invalidate the slot.
void TakeFrameSlotAt(int index); void TakeFrameSlotAt(int index);
@ -505,9 +457,8 @@ class VirtualFrame : public ZoneObject {
inline bool Equals(VirtualFrame* other); inline bool Equals(VirtualFrame* other);
// Classes that need raw access to the elements_ array.
friend class DeferredCode;
friend class JumpTarget; friend class JumpTarget;
friend class DeferredCode;
}; };

10
deps/v8/src/assembler.cc

@ -664,6 +664,16 @@ ExternalReference ExternalReference::scheduled_exception_address() {
} }
ExternalReference ExternalReference::compile_array_pop_call() {
return ExternalReference(FUNCTION_ADDR(CompileArrayPopCall));
}
ExternalReference ExternalReference::compile_array_push_call() {
return ExternalReference(FUNCTION_ADDR(CompileArrayPushCall));
}
#ifdef V8_NATIVE_REGEXP #ifdef V8_NATIVE_REGEXP
ExternalReference ExternalReference::re_check_stack_guard_state() { ExternalReference ExternalReference::re_check_stack_guard_state() {

3
deps/v8/src/assembler.h

@ -443,6 +443,9 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference scheduled_exception_address(); static ExternalReference scheduled_exception_address();
static ExternalReference compile_array_pop_call();
static ExternalReference compile_array_push_call();
Address address() const {return reinterpret_cast<Address>(address_);} Address address() const {return reinterpret_cast<Address>(address_);}
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT

661
deps/v8/src/ast.cc

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -28,6 +28,7 @@
#include "v8.h" #include "v8.h"
#include "ast.h" #include "ast.h"
#include "data-flow.h"
#include "parser.h" #include "parser.h"
#include "scopes.h" #include "scopes.h"
#include "string-stream.h" #include "string-stream.h"
@ -79,7 +80,8 @@ VariableProxy::VariableProxy(Handle<String> name,
is_this_(is_this), is_this_(is_this),
inside_with_(inside_with), inside_with_(inside_with),
is_trivial_(false), is_trivial_(false),
reaching_definitions_(NULL) { reaching_definitions_(NULL),
is_primitive_(false) {
// names must be canonicalized for fast equality checks // names must be canonicalized for fast equality checks
ASSERT(name->IsSymbol()); ASSERT(name->IsSymbol());
} }
@ -87,7 +89,8 @@ VariableProxy::VariableProxy(Handle<String> name,
VariableProxy::VariableProxy(bool is_this) VariableProxy::VariableProxy(bool is_this)
: is_this_(is_this), : is_this_(is_this),
reaching_definitions_(NULL) { reaching_definitions_(NULL),
is_primitive_(false) {
} }
@ -169,6 +172,72 @@ void TargetCollector::AddTarget(BreakTarget* target) {
} }
bool Expression::GuaranteedSmiResult() {
BinaryOperation* node = AsBinaryOperation();
if (node == NULL) return false;
Token::Value op = node->op();
switch (op) {
case Token::COMMA:
case Token::OR:
case Token::AND:
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV:
case Token::MOD:
case Token::BIT_XOR:
case Token::SHL:
return false;
break;
case Token::BIT_OR:
case Token::BIT_AND: {
Literal* left = node->left()->AsLiteral();
Literal* right = node->right()->AsLiteral();
if (left != NULL && left->handle()->IsSmi()) {
int value = Smi::cast(*left->handle())->value();
if (op == Token::BIT_OR && ((value & 0xc0000000) == 0xc0000000)) {
// Result of bitwise or is always a negative Smi.
return true;
}
if (op == Token::BIT_AND && ((value & 0xc0000000) == 0)) {
// Result of bitwise and is always a positive Smi.
return true;
}
}
if (right != NULL && right->handle()->IsSmi()) {
int value = Smi::cast(*right->handle())->value();
if (op == Token::BIT_OR && ((value & 0xc0000000) == 0xc0000000)) {
// Result of bitwise or is always a negative Smi.
return true;
}
if (op == Token::BIT_AND && ((value & 0xc0000000) == 0)) {
// Result of bitwise and is always a positive Smi.
return true;
}
}
return false;
break;
}
case Token::SAR:
case Token::SHR: {
Literal* right = node->right()->AsLiteral();
if (right != NULL && right->handle()->IsSmi()) {
int value = Smi::cast(*right->handle())->value();
if ((value & 0x1F) > 1 ||
(op == Token::SAR && (value & 0x1F) == 1)) {
return true;
}
}
return false;
break;
}
default:
UNREACHABLE();
break;
}
return false;
}
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Implementation of AstVisitor // Implementation of AstVisitor
@ -507,7 +576,7 @@ RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
// The following expression types are never primitive because they express // The following expression types are never primitive because they express
// Object values. // Object values.
bool FunctionLiteral::IsPrimitive() { return false; } bool FunctionLiteral::IsPrimitive() { return false; }
bool FunctionBoilerplateLiteral::IsPrimitive() { return false; } bool SharedFunctionInfoLiteral::IsPrimitive() { return false; }
bool RegExpLiteral::IsPrimitive() { return false; } bool RegExpLiteral::IsPrimitive() { return false; }
bool ObjectLiteral::IsPrimitive() { return false; } bool ObjectLiteral::IsPrimitive() { return false; }
bool ArrayLiteral::IsPrimitive() { return false; } bool ArrayLiteral::IsPrimitive() { return false; }
@ -518,12 +587,18 @@ bool ThisFunction::IsPrimitive() { return false; }
// The following expression types are not always primitive because we do not // The following expression types are not always primitive because we do not
// have enough information to conclude that they are. // have enough information to conclude that they are.
bool VariableProxy::IsPrimitive() { return false; }
bool Property::IsPrimitive() { return false; } bool Property::IsPrimitive() { return false; }
bool Call::IsPrimitive() { return false; } bool Call::IsPrimitive() { return false; }
bool CallRuntime::IsPrimitive() { return false; } bool CallRuntime::IsPrimitive() { return false; }
// A variable use is not primitive unless the primitive-type analysis
// determines otherwise.
bool VariableProxy::IsPrimitive() {
ASSERT(!is_primitive_ || (var() != NULL && var()->IsStackAllocated()));
return is_primitive_;
}
// The value of a conditional is the value of one of the alternatives. It's // The value of a conditional is the value of one of the alternatives. It's
// always primitive if both alternatives are always primitive. // always primitive if both alternatives are always primitive.
bool Conditional::IsPrimitive() { bool Conditional::IsPrimitive() {
@ -592,4 +667,580 @@ bool BinaryOperation::IsPrimitive() {
bool CompareOperation::IsPrimitive() { return true; } bool CompareOperation::IsPrimitive() { return true; }
// Overridden IsCritical member functions. IsCritical is true for AST nodes
// whose evaluation is absolutely required (they are never dead) because
// they are externally visible.
// References to global variables or lookup slots are critical because they
// may have getters. All others, including parameters rewritten to explicit
// property references, are not critical.
bool VariableProxy::IsCritical() {
Variable* var = AsVariable();
return var != NULL &&
(var->slot() == NULL || var->slot()->type() == Slot::LOOKUP);
}
// Literals are never critical.
bool Literal::IsCritical() { return false; }
// Property assignments and throwing of reference errors are always
// critical. Assignments to escaping variables are also critical. In
// addition the operation of compound assignments is critical if either of
// its operands is non-primitive (the arithmetic operations all use one of
// ToPrimitive, ToNumber, ToInt32, or ToUint32 on each of their operands).
// In this case, we mark the entire AST node as critical because there is
// no binary operation node to mark.
bool Assignment::IsCritical() {
Variable* var = AssignedVariable();
return var == NULL ||
!var->IsStackAllocated() ||
(is_compound() && (!target()->IsPrimitive() || !value()->IsPrimitive()));
}
// Property references are always critical, because they may have getters.
bool Property::IsCritical() { return true; }
// Calls are always critical.
bool Call::IsCritical() { return true; }
// +,- use ToNumber on the value of their operand.
bool UnaryOperation::IsCritical() {
ASSERT(op() == Token::ADD || op() == Token::SUB);
return !expression()->IsPrimitive();
}
// Count operations targeting properties and reference errors are always
// critical. Count operations on escaping variables are critical. Count
// operations targeting non-primitives are also critical because they use
// ToNumber.
bool CountOperation::IsCritical() {
Variable* var = AssignedVariable();
return var == NULL ||
!var->IsStackAllocated() ||
!expression()->IsPrimitive();
}
// Arithmetic operations all use one of ToPrimitive, ToNumber, ToInt32, or
// ToUint32 on each of their operands.
bool BinaryOperation::IsCritical() {
ASSERT(op() != Token::COMMA);
ASSERT(op() != Token::OR);
ASSERT(op() != Token::AND);
return !left()->IsPrimitive() || !right()->IsPrimitive();
}
// <, >, <=, and >= all use ToPrimitive on both their operands.
bool CompareOperation::IsCritical() {
ASSERT(op() != Token::EQ);
ASSERT(op() != Token::NE);
ASSERT(op() != Token::EQ_STRICT);
ASSERT(op() != Token::NE_STRICT);
ASSERT(op() != Token::INSTANCEOF);
ASSERT(op() != Token::IN);
return !left()->IsPrimitive() || !right()->IsPrimitive();
}
static inline void MarkIfNotLive(Expression* expr, List<AstNode*>* stack) {
if (!expr->is_live()) {
expr->mark_as_live();
stack->Add(expr);
}
}
// Overloaded functions for marking children of live code as live.
void VariableProxy::ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
// A reference to a stack-allocated variable depends on all the
// definitions reaching it.
BitVector* defs = reaching_definitions();
if (defs != NULL) {
ASSERT(var()->IsStackAllocated());
// The first variable_count definitions are the initial parameter and
// local declarations.
for (int i = variable_count; i < defs->length(); i++) {
if (defs->Contains(i)) {
MarkIfNotLive(body_definitions->at(i - variable_count), stack);
}
}
}
}
void Literal::ProcessNonLiveChildren(List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
// Leaf node, no children.
}
void Assignment::ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
Property* prop = target()->AsProperty();
VariableProxy* proxy = target()->AsVariableProxy();
if (prop != NULL) {
if (!prop->key()->IsPropertyName()) MarkIfNotLive(prop->key(), stack);
MarkIfNotLive(prop->obj(), stack);
} else if (proxy == NULL) {
// Must be a reference error.
ASSERT(!target()->IsValidLeftHandSide());
MarkIfNotLive(target(), stack);
} else if (is_compound()) {
// A variable assignment so lhs is an operand to the operation.
MarkIfNotLive(target(), stack);
}
MarkIfNotLive(value(), stack);
}
void Property::ProcessNonLiveChildren(List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
if (!key()->IsPropertyName()) MarkIfNotLive(key(), stack);
MarkIfNotLive(obj(), stack);
}
void Call::ProcessNonLiveChildren(List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
ZoneList<Expression*>* args = arguments();
for (int i = args->length() - 1; i >= 0; i--) {
MarkIfNotLive(args->at(i), stack);
}
MarkIfNotLive(expression(), stack);
}
void UnaryOperation::ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
MarkIfNotLive(expression(), stack);
}
void CountOperation::ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
MarkIfNotLive(expression(), stack);
}
void BinaryOperation::ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
MarkIfNotLive(right(), stack);
MarkIfNotLive(left(), stack);
}
void CompareOperation::ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
MarkIfNotLive(right(), stack);
MarkIfNotLive(left(), stack);
}
// Implementation of a copy visitor. The visitor create a deep copy
// of ast nodes. Nodes that do not require a deep copy are copied
// with the default copy constructor.
AstNode::AstNode(AstNode* other) : num_(kNoNumber) {
// AST node number should be unique. Assert that we only copy AstNodes
// before node numbers are assigned.
ASSERT(other->num_ == kNoNumber);
}
Statement::Statement(Statement* other)
: AstNode(other), statement_pos_(other->statement_pos_) {}
Expression::Expression(Expression* other)
: AstNode(other),
bitfields_(other->bitfields_),
type_(other->type_) {}
BreakableStatement::BreakableStatement(BreakableStatement* other)
: Statement(other), labels_(other->labels_), type_(other->type_) {}
Block::Block(Block* other, ZoneList<Statement*>* statements)
: BreakableStatement(other),
statements_(statements->length()),
is_initializer_block_(other->is_initializer_block_) {
statements_.AddAll(*statements);
}
ExpressionStatement::ExpressionStatement(ExpressionStatement* other,
Expression* expression)
: Statement(other), expression_(expression) {}
IfStatement::IfStatement(IfStatement* other,
Expression* condition,
Statement* then_statement,
Statement* else_statement)
: Statement(other),
condition_(condition),
then_statement_(then_statement),
else_statement_(else_statement) {}
EmptyStatement::EmptyStatement(EmptyStatement* other) : Statement(other) {}
IterationStatement::IterationStatement(IterationStatement* other,
Statement* body)
: BreakableStatement(other), body_(body) {}
ForStatement::ForStatement(ForStatement* other,
Statement* init,
Expression* cond,
Statement* next,
Statement* body)
: IterationStatement(other, body),
init_(init),
cond_(cond),
next_(next),
may_have_function_literal_(other->may_have_function_literal_),
loop_variable_(other->loop_variable_),
peel_this_loop_(other->peel_this_loop_) {}
Assignment::Assignment(Assignment* other,
Expression* target,
Expression* value)
: Expression(other),
op_(other->op_),
target_(target),
value_(value),
pos_(other->pos_),
block_start_(other->block_start_),
block_end_(other->block_end_) {}
Property::Property(Property* other, Expression* obj, Expression* key)
: Expression(other),
obj_(obj),
key_(key),
pos_(other->pos_),
type_(other->type_) {}
Call::Call(Call* other,
Expression* expression,
ZoneList<Expression*>* arguments)
: Expression(other),
expression_(expression),
arguments_(arguments),
pos_(other->pos_) {}
UnaryOperation::UnaryOperation(UnaryOperation* other, Expression* expression)
: Expression(other), op_(other->op_), expression_(expression) {}
BinaryOperation::BinaryOperation(BinaryOperation* other,
Expression* left,
Expression* right)
: Expression(other),
op_(other->op_),
left_(left),
right_(right) {}
CountOperation::CountOperation(CountOperation* other, Expression* expression)
: Expression(other),
is_prefix_(other->is_prefix_),
op_(other->op_),
expression_(expression) {}
CompareOperation::CompareOperation(CompareOperation* other,
Expression* left,
Expression* right)
: Expression(other),
op_(other->op_),
left_(left),
right_(right) {}
Expression* CopyAstVisitor::DeepCopyExpr(Expression* expr) {
expr_ = NULL;
if (expr != NULL) Visit(expr);
return expr_;
}
Statement* CopyAstVisitor::DeepCopyStmt(Statement* stmt) {
stmt_ = NULL;
if (stmt != NULL) Visit(stmt);
return stmt_;
}
ZoneList<Expression*>* CopyAstVisitor::DeepCopyExprList(
ZoneList<Expression*>* expressions) {
ZoneList<Expression*>* copy =
new ZoneList<Expression*>(expressions->length());
for (int i = 0; i < expressions->length(); i++) {
copy->Add(DeepCopyExpr(expressions->at(i)));
}
return copy;
}
ZoneList<Statement*>* CopyAstVisitor::DeepCopyStmtList(
ZoneList<Statement*>* statements) {
ZoneList<Statement*>* copy = new ZoneList<Statement*>(statements->length());
for (int i = 0; i < statements->length(); i++) {
copy->Add(DeepCopyStmt(statements->at(i)));
}
return copy;
}
void CopyAstVisitor::VisitBlock(Block* stmt) {
stmt_ = new Block(stmt,
DeepCopyStmtList(stmt->statements()));
}
void CopyAstVisitor::VisitExpressionStatement(
ExpressionStatement* stmt) {
stmt_ = new ExpressionStatement(stmt, DeepCopyExpr(stmt->expression()));
}
void CopyAstVisitor::VisitEmptyStatement(EmptyStatement* stmt) {
stmt_ = new EmptyStatement(stmt);
}
void CopyAstVisitor::VisitIfStatement(IfStatement* stmt) {
stmt_ = new IfStatement(stmt,
DeepCopyExpr(stmt->condition()),
DeepCopyStmt(stmt->then_statement()),
DeepCopyStmt(stmt->else_statement()));
}
void CopyAstVisitor::VisitContinueStatement(ContinueStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitBreakStatement(BreakStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitReturnStatement(ReturnStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitWithEnterStatement(
WithEnterStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitWithExitStatement(WithExitStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitSwitchStatement(SwitchStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitDoWhileStatement(DoWhileStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitWhileStatement(WhileStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitForStatement(ForStatement* stmt) {
stmt_ = new ForStatement(stmt,
DeepCopyStmt(stmt->init()),
DeepCopyExpr(stmt->cond()),
DeepCopyStmt(stmt->next()),
DeepCopyStmt(stmt->body()));
}
void CopyAstVisitor::VisitForInStatement(ForInStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitTryCatchStatement(TryCatchStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitTryFinallyStatement(
TryFinallyStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitDebuggerStatement(
DebuggerStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitFunctionLiteral(FunctionLiteral* expr) {
SetStackOverflow();
}
void CopyAstVisitor::VisitSharedFunctionInfoLiteral(
SharedFunctionInfoLiteral* expr) {
SetStackOverflow();
}
void CopyAstVisitor::VisitConditional(Conditional* expr) {
SetStackOverflow();
}
void CopyAstVisitor::VisitSlot(Slot* expr) {
UNREACHABLE();
}
void CopyAstVisitor::VisitVariableProxy(VariableProxy* expr) {
expr_ = new VariableProxy(*expr);
}
void CopyAstVisitor::VisitLiteral(Literal* expr) {
expr_ = new Literal(*expr);
}
void CopyAstVisitor::VisitRegExpLiteral(RegExpLiteral* expr) {
SetStackOverflow();
}
void CopyAstVisitor::VisitObjectLiteral(ObjectLiteral* expr) {
SetStackOverflow();
}
void CopyAstVisitor::VisitArrayLiteral(ArrayLiteral* expr) {
SetStackOverflow();
}
void CopyAstVisitor::VisitCatchExtensionObject(
CatchExtensionObject* expr) {
SetStackOverflow();
}
void CopyAstVisitor::VisitAssignment(Assignment* expr) {
expr_ = new Assignment(expr,
DeepCopyExpr(expr->target()),
DeepCopyExpr(expr->value()));
}
void CopyAstVisitor::VisitThrow(Throw* expr) {
SetStackOverflow();
}
void CopyAstVisitor::VisitProperty(Property* expr) {
expr_ = new Property(expr,
DeepCopyExpr(expr->obj()),
DeepCopyExpr(expr->key()));
}
void CopyAstVisitor::VisitCall(Call* expr) {
expr_ = new Call(expr,
DeepCopyExpr(expr->expression()),
DeepCopyExprList(expr->arguments()));
}
void CopyAstVisitor::VisitCallNew(CallNew* expr) {
SetStackOverflow();
}
void CopyAstVisitor::VisitCallRuntime(CallRuntime* expr) {
SetStackOverflow();
}
void CopyAstVisitor::VisitUnaryOperation(UnaryOperation* expr) {
expr_ = new UnaryOperation(expr, DeepCopyExpr(expr->expression()));
}
void CopyAstVisitor::VisitCountOperation(CountOperation* expr) {
expr_ = new CountOperation(expr,
DeepCopyExpr(expr->expression()));
}
void CopyAstVisitor::VisitBinaryOperation(BinaryOperation* expr) {
expr_ = new BinaryOperation(expr,
DeepCopyExpr(expr->left()),
DeepCopyExpr(expr->right()));
}
void CopyAstVisitor::VisitCompareOperation(CompareOperation* expr) {
expr_ = new CompareOperation(expr,
DeepCopyExpr(expr->left()),
DeepCopyExpr(expr->right()));
}
void CopyAstVisitor::VisitThisFunction(ThisFunction* expr) {
SetStackOverflow();
}
void CopyAstVisitor::VisitDeclaration(Declaration* decl) {
UNREACHABLE();
}
} } // namespace v8::internal } } // namespace v8::internal

209
deps/v8/src/ast.h

@ -73,7 +73,7 @@ namespace internal {
#define EXPRESSION_NODE_LIST(V) \ #define EXPRESSION_NODE_LIST(V) \
V(FunctionLiteral) \ V(FunctionLiteral) \
V(FunctionBoilerplateLiteral) \ V(SharedFunctionInfoLiteral) \
V(Conditional) \ V(Conditional) \
V(Slot) \ V(Slot) \
V(VariableProxy) \ V(VariableProxy) \
@ -121,11 +121,15 @@ class AstNode: public ZoneObject {
static const int kNoNumber = -1; static const int kNoNumber = -1;
AstNode() : num_(kNoNumber) {} AstNode() : num_(kNoNumber) {}
explicit AstNode(AstNode* other);
virtual ~AstNode() { } virtual ~AstNode() { }
virtual void Accept(AstVisitor* v) = 0; virtual void Accept(AstVisitor* v) = 0;
// Type testing & conversion. // Type testing & conversion.
virtual Statement* AsStatement() { return NULL; } virtual Statement* AsStatement() { return NULL; }
virtual Block* AsBlock() { return NULL; }
virtual ExpressionStatement* AsExpressionStatement() { return NULL; } virtual ExpressionStatement* AsExpressionStatement() { return NULL; }
virtual EmptyStatement* AsEmptyStatement() { return NULL; } virtual EmptyStatement* AsEmptyStatement() { return NULL; }
virtual Expression* AsExpression() { return NULL; } virtual Expression* AsExpression() { return NULL; }
@ -137,6 +141,7 @@ class AstNode: public ZoneObject {
virtual TargetCollector* AsTargetCollector() { return NULL; } virtual TargetCollector* AsTargetCollector() { return NULL; }
virtual BreakableStatement* AsBreakableStatement() { return NULL; } virtual BreakableStatement* AsBreakableStatement() { return NULL; }
virtual IterationStatement* AsIterationStatement() { return NULL; } virtual IterationStatement* AsIterationStatement() { return NULL; }
virtual ForStatement* AsForStatement() { return NULL; }
virtual UnaryOperation* AsUnaryOperation() { return NULL; } virtual UnaryOperation* AsUnaryOperation() { return NULL; }
virtual CountOperation* AsCountOperation() { return NULL; } virtual CountOperation* AsCountOperation() { return NULL; }
virtual BinaryOperation* AsBinaryOperation() { return NULL; } virtual BinaryOperation* AsBinaryOperation() { return NULL; }
@ -147,6 +152,13 @@ class AstNode: public ZoneObject {
virtual ArrayLiteral* AsArrayLiteral() { return NULL; } virtual ArrayLiteral* AsArrayLiteral() { return NULL; }
virtual CompareOperation* AsCompareOperation() { return NULL; } virtual CompareOperation* AsCompareOperation() { return NULL; }
// True if the AST node is critical (its execution is needed or externally
// visible in some way).
virtual bool IsCritical() {
UNREACHABLE();
return true;
}
int num() { return num_; } int num() { return num_; }
void set_num(int n) { num_ = n; } void set_num(int n) { num_ = n; }
@ -160,6 +172,8 @@ class Statement: public AstNode {
public: public:
Statement() : statement_pos_(RelocInfo::kNoPosition) {} Statement() : statement_pos_(RelocInfo::kNoPosition) {}
explicit Statement(Statement* other);
virtual Statement* AsStatement() { return this; } virtual Statement* AsStatement() { return this; }
virtual ReturnStatement* AsReturnStatement() { return NULL; } virtual ReturnStatement* AsReturnStatement() { return NULL; }
@ -198,11 +212,13 @@ class Expression: public AstNode {
Expression() : bitfields_(0) {} Expression() : bitfields_(0) {}
explicit Expression(Expression* other);
virtual Expression* AsExpression() { return this; } virtual Expression* AsExpression() { return this; }
virtual bool IsValidLeftHandSide() { return false; } virtual bool IsValidLeftHandSide() { return false; }
virtual Variable* AssignedVar() { return NULL; } virtual Variable* AssignedVariable() { return NULL; }
// Symbols that cannot be parsed as array indices are considered property // Symbols that cannot be parsed as array indices are considered property
// names. We do not treat symbols that can be array indexes as property // names. We do not treat symbols that can be array indexes as property
@ -230,6 +246,19 @@ class Expression: public AstNode {
// Static type information for this expression. // Static type information for this expression.
StaticType* type() { return &type_; } StaticType* type() { return &type_; }
// True if the expression is a loop condition.
bool is_loop_condition() const {
return LoopConditionField::decode(bitfields_);
}
void set_is_loop_condition(bool flag) {
bitfields_ = (bitfields_ & ~LoopConditionField::mask()) |
LoopConditionField::encode(flag);
}
// The value of the expression is guaranteed to be a smi, because the
// top operation is a bit operation with a mask, or a shift.
bool GuaranteedSmiResult();
// AST analysis results // AST analysis results
// True if the expression rooted at this node can be compiled by the // True if the expression rooted at this node can be compiled by the
@ -265,6 +294,18 @@ class Expression: public AstNode {
bitfields_ |= NumBitOpsField::encode(num_bit_ops); bitfields_ |= NumBitOpsField::encode(num_bit_ops);
} }
// Functions used for dead-code elimination. Predicate is true if the
// expression is not dead code.
int is_live() const { return LiveField::decode(bitfields_); }
void mark_as_live() { bitfields_ |= LiveField::encode(true); }
// Mark non-live children as live and push them on a stack for further
// processing.
virtual void ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
}
private: private:
static const int kMaxNumBitOps = (1 << 5) - 1; static const int kMaxNumBitOps = (1 << 5) - 1;
@ -277,6 +318,8 @@ class Expression: public AstNode {
class NoNegativeZeroField : public BitField<bool, 1, 1> {}; class NoNegativeZeroField : public BitField<bool, 1, 1> {};
class ToInt32Field : public BitField<bool, 2, 1> {}; class ToInt32Field : public BitField<bool, 2, 1> {};
class NumBitOpsField : public BitField<int, 3, 5> {}; class NumBitOpsField : public BitField<int, 3, 5> {};
class LoopConditionField: public BitField<bool, 8, 1> {};
class LiveField: public BitField<bool, 9, 1> {};
}; };
@ -327,6 +370,8 @@ class BreakableStatement: public Statement {
ASSERT(labels == NULL || labels->length() > 0); ASSERT(labels == NULL || labels->length() > 0);
} }
explicit BreakableStatement(BreakableStatement* other);
private: private:
ZoneStringList* labels_; ZoneStringList* labels_;
Type type_; Type type_;
@ -341,8 +386,14 @@ class Block: public BreakableStatement {
statements_(capacity), statements_(capacity),
is_initializer_block_(is_initializer_block) { } is_initializer_block_(is_initializer_block) { }
// Construct a clone initialized from the original block and
// a deep copy of all statements of the original block.
Block(Block* other, ZoneList<Statement*>* statements);
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual Block* AsBlock() { return this; }
virtual Assignment* StatementAsSimpleAssignment() { virtual Assignment* StatementAsSimpleAssignment() {
if (statements_.length() != 1) return NULL; if (statements_.length() != 1) return NULL;
return statements_[0]->StatementAsSimpleAssignment(); return statements_[0]->StatementAsSimpleAssignment();
@ -394,6 +445,7 @@ class IterationStatement: public BreakableStatement {
virtual IterationStatement* AsIterationStatement() { return this; } virtual IterationStatement* AsIterationStatement() { return this; }
Statement* body() const { return body_; } Statement* body() const { return body_; }
void set_body(Statement* stmt) { body_ = stmt; }
// Code generation // Code generation
BreakTarget* continue_target() { return &continue_target_; } BreakTarget* continue_target() { return &continue_target_; }
@ -402,6 +454,10 @@ class IterationStatement: public BreakableStatement {
explicit IterationStatement(ZoneStringList* labels) explicit IterationStatement(ZoneStringList* labels)
: BreakableStatement(labels, TARGET_FOR_ANONYMOUS), body_(NULL) { } : BreakableStatement(labels, TARGET_FOR_ANONYMOUS), body_(NULL) { }
// Construct a clone initialized from original and
// a deep copy of the original body.
IterationStatement(IterationStatement* other, Statement* body);
void Initialize(Statement* body) { void Initialize(Statement* body) {
body_ = body; body_ = body;
} }
@ -475,7 +531,18 @@ class ForStatement: public IterationStatement {
cond_(NULL), cond_(NULL),
next_(NULL), next_(NULL),
may_have_function_literal_(true), may_have_function_literal_(true),
loop_variable_(NULL) {} loop_variable_(NULL),
peel_this_loop_(false) {}
// Construct a for-statement initialized from another for-statement
// and deep copies of all parts of the original statement.
ForStatement(ForStatement* other,
Statement* init,
Expression* cond,
Statement* next,
Statement* body);
virtual ForStatement* AsForStatement() { return this; }
void Initialize(Statement* init, void Initialize(Statement* init,
Expression* cond, Expression* cond,
@ -490,8 +557,11 @@ class ForStatement: public IterationStatement {
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
Statement* init() const { return init_; } Statement* init() const { return init_; }
void set_init(Statement* stmt) { init_ = stmt; }
Expression* cond() const { return cond_; } Expression* cond() const { return cond_; }
void set_cond(Expression* expr) { cond_ = expr; }
Statement* next() const { return next_; } Statement* next() const { return next_; }
void set_next(Statement* stmt) { next_ = stmt; }
bool may_have_function_literal() const { bool may_have_function_literal() const {
return may_have_function_literal_; return may_have_function_literal_;
} }
@ -500,6 +570,9 @@ class ForStatement: public IterationStatement {
Variable* loop_variable() { return loop_variable_; } Variable* loop_variable() { return loop_variable_; }
void set_loop_variable(Variable* var) { loop_variable_ = var; } void set_loop_variable(Variable* var) { loop_variable_ = var; }
bool peel_this_loop() { return peel_this_loop_; }
void set_peel_this_loop(bool b) { peel_this_loop_ = b; }
private: private:
Statement* init_; Statement* init_;
Expression* cond_; Expression* cond_;
@ -507,6 +580,7 @@ class ForStatement: public IterationStatement {
// True if there is a function literal subexpression in the condition. // True if there is a function literal subexpression in the condition.
bool may_have_function_literal_; bool may_have_function_literal_;
Variable* loop_variable_; Variable* loop_variable_;
bool peel_this_loop_;
friend class AstOptimizer; friend class AstOptimizer;
}; };
@ -539,6 +613,10 @@ class ExpressionStatement: public Statement {
explicit ExpressionStatement(Expression* expression) explicit ExpressionStatement(Expression* expression)
: expression_(expression) { } : expression_(expression) { }
// Construct an expression statement initialized from another
// expression statement and a deep copy of the original expression.
ExpressionStatement(ExpressionStatement* other, Expression* expression);
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
// Type testing & conversion. // Type testing & conversion.
@ -681,6 +759,13 @@ class IfStatement: public Statement {
then_statement_(then_statement), then_statement_(then_statement),
else_statement_(else_statement) { } else_statement_(else_statement) { }
// Construct an if-statement initialized from another if-statement
// and deep copies of all parts of the original.
IfStatement(IfStatement* other,
Expression* condition,
Statement* then_statement,
Statement* else_statement);
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
bool HasThenStatement() const { return !then_statement()->IsEmpty(); } bool HasThenStatement() const { return !then_statement()->IsEmpty(); }
@ -688,7 +773,9 @@ class IfStatement: public Statement {
Expression* condition() const { return condition_; } Expression* condition() const { return condition_; }
Statement* then_statement() const { return then_statement_; } Statement* then_statement() const { return then_statement_; }
void set_then_statement(Statement* stmt) { then_statement_ = stmt; }
Statement* else_statement() const { return else_statement_; } Statement* else_statement() const { return else_statement_; }
void set_else_statement(Statement* stmt) { else_statement_ = stmt; }
private: private:
Expression* condition_; Expression* condition_;
@ -783,6 +870,10 @@ class DebuggerStatement: public Statement {
class EmptyStatement: public Statement { class EmptyStatement: public Statement {
public: public:
EmptyStatement() {}
explicit EmptyStatement(EmptyStatement* other);
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
// Type testing & conversion. // Type testing & conversion.
@ -815,6 +906,11 @@ class Literal: public Expression {
virtual bool IsLeaf() { return true; } virtual bool IsLeaf() { return true; }
virtual bool IsTrivial() { return true; } virtual bool IsTrivial() { return true; }
virtual bool IsPrimitive(); virtual bool IsPrimitive();
virtual bool IsCritical();
virtual void ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
// Identity testers. // Identity testers.
bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); } bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
@ -1021,6 +1117,13 @@ class VariableProxy: public Expression {
virtual bool IsTrivial() { return is_trivial_; } virtual bool IsTrivial() { return is_trivial_; }
virtual bool IsPrimitive(); virtual bool IsPrimitive();
virtual bool IsCritical();
virtual void ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
void SetIsPrimitive(bool value) { is_primitive_ = value; }
bool IsVariable(Handle<String> n) { bool IsVariable(Handle<String> n) {
return !is_this() && name().is_identical_to(n); return !is_this() && name().is_identical_to(n);
@ -1051,6 +1154,7 @@ class VariableProxy: public Expression {
bool inside_with_; bool inside_with_;
bool is_trivial_; bool is_trivial_;
BitVector* reaching_definitions_; BitVector* reaching_definitions_;
bool is_primitive_;
VariableProxy(Handle<String> name, bool is_this, bool inside_with); VariableProxy(Handle<String> name, bool is_this, bool inside_with);
explicit VariableProxy(bool is_this); explicit VariableProxy(bool is_this);
@ -1145,6 +1249,8 @@ class Property: public Expression {
Property(Expression* obj, Expression* key, int pos, Type type = NORMAL) Property(Expression* obj, Expression* key, int pos, Type type = NORMAL)
: obj_(obj), key_(key), pos_(pos), type_(type) { } : obj_(obj), key_(key), pos_(pos), type_(type) { }
Property(Property* other, Expression* obj, Expression* key);
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
// Type testing & conversion // Type testing & conversion
@ -1153,6 +1259,11 @@ class Property: public Expression {
virtual bool IsValidLeftHandSide() { return true; } virtual bool IsValidLeftHandSide() { return true; }
virtual bool IsPrimitive(); virtual bool IsPrimitive();
virtual bool IsCritical();
virtual void ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
Expression* obj() const { return obj_; } Expression* obj() const { return obj_; }
Expression* key() const { return key_; } Expression* key() const { return key_; }
@ -1179,12 +1290,19 @@ class Call: public Expression {
Call(Expression* expression, ZoneList<Expression*>* arguments, int pos) Call(Expression* expression, ZoneList<Expression*>* arguments, int pos)
: expression_(expression), arguments_(arguments), pos_(pos) { } : expression_(expression), arguments_(arguments), pos_(pos) { }
Call(Call* other, Expression* expression, ZoneList<Expression*>* arguments);
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
// Type testing and conversion. // Type testing and conversion.
virtual Call* AsCall() { return this; } virtual Call* AsCall() { return this; }
virtual bool IsPrimitive(); virtual bool IsPrimitive();
virtual bool IsCritical();
virtual void ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
Expression* expression() const { return expression_; } Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; } ZoneList<Expression*>* arguments() const { return arguments_; }
@ -1255,12 +1373,19 @@ class UnaryOperation: public Expression {
ASSERT(Token::IsUnaryOp(op)); ASSERT(Token::IsUnaryOp(op));
} }
UnaryOperation(UnaryOperation* other, Expression* expression);
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
// Type testing & conversion // Type testing & conversion
virtual UnaryOperation* AsUnaryOperation() { return this; } virtual UnaryOperation* AsUnaryOperation() { return this; }
virtual bool IsPrimitive(); virtual bool IsPrimitive();
virtual bool IsCritical();
virtual void ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
Token::Value op() const { return op_; } Token::Value op() const { return op_; }
Expression* expression() const { return expression_; } Expression* expression() const { return expression_; }
@ -1278,12 +1403,19 @@ class BinaryOperation: public Expression {
ASSERT(Token::IsBinaryOp(op)); ASSERT(Token::IsBinaryOp(op));
} }
BinaryOperation(BinaryOperation* other, Expression* left, Expression* right);
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
// Type testing & conversion // Type testing & conversion
virtual BinaryOperation* AsBinaryOperation() { return this; } virtual BinaryOperation* AsBinaryOperation() { return this; }
virtual bool IsPrimitive(); virtual bool IsPrimitive();
virtual bool IsCritical();
virtual void ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
// True iff the result can be safely overwritten (to avoid allocation). // True iff the result can be safely overwritten (to avoid allocation).
// False for operations that can return one of their operands. // False for operations that can return one of their operands.
@ -1329,15 +1461,22 @@ class CountOperation: public Expression {
ASSERT(Token::IsCountOp(op)); ASSERT(Token::IsCountOp(op));
} }
CountOperation(CountOperation* other, Expression* expression);
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual CountOperation* AsCountOperation() { return this; } virtual CountOperation* AsCountOperation() { return this; }
virtual Variable* AssignedVar() { virtual Variable* AssignedVariable() {
return expression()->AsVariableProxy()->AsVariable(); return expression()->AsVariableProxy()->AsVariable();
} }
virtual bool IsPrimitive(); virtual bool IsPrimitive();
virtual bool IsCritical();
virtual void ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
bool is_prefix() const { return is_prefix_; } bool is_prefix() const { return is_prefix_; }
bool is_postfix() const { return !is_prefix_; } bool is_postfix() const { return !is_prefix_; }
@ -1359,22 +1498,27 @@ class CountOperation: public Expression {
class CompareOperation: public Expression { class CompareOperation: public Expression {
public: public:
CompareOperation(Token::Value op, Expression* left, Expression* right) CompareOperation(Token::Value op, Expression* left, Expression* right)
: op_(op), left_(left), right_(right), is_for_loop_condition_(false) { : op_(op), left_(left), right_(right) {
ASSERT(Token::IsCompareOp(op)); ASSERT(Token::IsCompareOp(op));
} }
CompareOperation(CompareOperation* other,
Expression* left,
Expression* right);
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual bool IsPrimitive(); virtual bool IsPrimitive();
virtual bool IsCritical();
virtual void ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
Token::Value op() const { return op_; } Token::Value op() const { return op_; }
Expression* left() const { return left_; } Expression* left() const { return left_; }
Expression* right() const { return right_; } Expression* right() const { return right_; }
// Accessors for flag whether this compare operation is hanging of a for loop.
bool is_for_loop_condition() const { return is_for_loop_condition_; }
void set_is_for_loop_condition() { is_for_loop_condition_ = true; }
// Type testing & conversion // Type testing & conversion
virtual CompareOperation* AsCompareOperation() { return this; } virtual CompareOperation* AsCompareOperation() { return this; }
@ -1382,7 +1526,6 @@ class CompareOperation: public Expression {
Token::Value op_; Token::Value op_;
Expression* left_; Expression* left_;
Expression* right_; Expression* right_;
bool is_for_loop_condition_;
}; };
@ -1418,14 +1561,21 @@ class Assignment: public Expression {
ASSERT(Token::IsAssignmentOp(op)); ASSERT(Token::IsAssignmentOp(op));
} }
Assignment(Assignment* other, Expression* target, Expression* value);
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual Assignment* AsAssignment() { return this; } virtual Assignment* AsAssignment() { return this; }
virtual bool IsPrimitive(); virtual bool IsPrimitive();
virtual bool IsCritical();
virtual void ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; } Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; }
virtual Variable* AssignedVar() { virtual Variable* AssignedVariable() {
return target()->AsVariableProxy()->AsVariable(); return target()->AsVariableProxy()->AsVariable();
} }
@ -1574,14 +1724,15 @@ class FunctionLiteral: public Expression {
}; };
class FunctionBoilerplateLiteral: public Expression { class SharedFunctionInfoLiteral: public Expression {
public: public:
explicit FunctionBoilerplateLiteral(Handle<JSFunction> boilerplate) explicit SharedFunctionInfoLiteral(
: boilerplate_(boilerplate) { Handle<SharedFunctionInfo> shared_function_info)
ASSERT(boilerplate->IsBoilerplate()); : shared_function_info_(shared_function_info) { }
}
Handle<JSFunction> boilerplate() const { return boilerplate_; } Handle<SharedFunctionInfo> shared_function_info() const {
return shared_function_info_;
}
virtual bool IsLeaf() { return true; } virtual bool IsLeaf() { return true; }
@ -1590,7 +1741,7 @@ class FunctionBoilerplateLiteral: public Expression {
virtual bool IsPrimitive(); virtual bool IsPrimitive();
private: private:
Handle<JSFunction> boilerplate_; Handle<SharedFunctionInfo> shared_function_info_;
}; };
@ -1993,6 +2144,28 @@ class AstVisitor BASE_EMBEDDED {
}; };
class CopyAstVisitor : public AstVisitor {
public:
Expression* DeepCopyExpr(Expression* expr);
Statement* DeepCopyStmt(Statement* stmt);
private:
ZoneList<Expression*>* DeepCopyExprList(ZoneList<Expression*>* expressions);
ZoneList<Statement*>* DeepCopyStmtList(ZoneList<Statement*>* statements);
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
// Holds the result of copying an expression.
Expression* expr_;
// Holds the result of copying a statement.
Statement* stmt_;
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_AST_H_ #endif // V8_AST_H_

669
deps/v8/src/bootstrapper.cc

@ -59,11 +59,12 @@ class SourceCodeCache BASE_EMBEDDED {
} }
bool Lookup(Vector<const char> name, Handle<JSFunction>* handle) { bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) {
for (int i = 0; i < cache_->length(); i+=2) { for (int i = 0; i < cache_->length(); i+=2) {
SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i)); SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i));
if (str->IsEqualTo(name)) { if (str->IsEqualTo(name)) {
*handle = Handle<JSFunction>(JSFunction::cast(cache_->get(i + 1))); *handle = Handle<SharedFunctionInfo>(
SharedFunctionInfo::cast(cache_->get(i + 1)));
return true; return true;
} }
} }
@ -71,8 +72,7 @@ class SourceCodeCache BASE_EMBEDDED {
} }
void Add(Vector<const char> name, Handle<JSFunction> fun) { void Add(Vector<const char> name, Handle<SharedFunctionInfo> shared) {
ASSERT(fun->IsBoilerplate());
HandleScope scope; HandleScope scope;
int length = cache_->length(); int length = cache_->length();
Handle<FixedArray> new_array = Handle<FixedArray> new_array =
@ -81,8 +81,8 @@ class SourceCodeCache BASE_EMBEDDED {
cache_ = *new_array; cache_ = *new_array;
Handle<String> str = Factory::NewStringFromAscii(name, TENURED); Handle<String> str = Factory::NewStringFromAscii(name, TENURED);
cache_->set(length, *str); cache_->set(length, *str);
cache_->set(length + 1, *fun); cache_->set(length + 1, *shared);
Script::cast(fun->shared()->script())->set_type(Smi::FromInt(type_)); Script::cast(shared->script())->set_type(Smi::FromInt(type_));
} }
private: private:
@ -91,7 +91,6 @@ class SourceCodeCache BASE_EMBEDDED {
DISALLOW_COPY_AND_ASSIGN(SourceCodeCache); DISALLOW_COPY_AND_ASSIGN(SourceCodeCache);
}; };
static SourceCodeCache natives_cache(Script::TYPE_NATIVE);
static SourceCodeCache extensions_cache(Script::TYPE_EXTENSION); static SourceCodeCache extensions_cache(Script::TYPE_EXTENSION);
// This is for delete, not delete[]. // This is for delete, not delete[].
static List<char*>* delete_these_non_arrays_on_tear_down = NULL; static List<char*>* delete_these_non_arrays_on_tear_down = NULL;
@ -134,20 +133,7 @@ Handle<String> Bootstrapper::NativesSourceLookup(int index) {
} }
bool Bootstrapper::NativesCacheLookup(Vector<const char> name,
Handle<JSFunction>* handle) {
return natives_cache.Lookup(name, handle);
}
void Bootstrapper::NativesCacheAdd(Vector<const char> name,
Handle<JSFunction> fun) {
natives_cache.Add(name, fun);
}
void Bootstrapper::Initialize(bool create_heap_objects) { void Bootstrapper::Initialize(bool create_heap_objects) {
natives_cache.Initialize(create_heap_objects);
extensions_cache.Initialize(create_heap_objects); extensions_cache.Initialize(create_heap_objects);
} }
@ -187,8 +173,7 @@ void Bootstrapper::TearDown() {
delete_these_arrays_on_tear_down = NULL; delete_these_arrays_on_tear_down = NULL;
} }
natives_cache.Initialize(false); // Yes, symmetrical extensions_cache.Initialize(false); // Yes, symmetrical
extensions_cache.Initialize(false);
} }
@ -197,17 +182,11 @@ class Genesis BASE_EMBEDDED {
Genesis(Handle<Object> global_object, Genesis(Handle<Object> global_object,
v8::Handle<v8::ObjectTemplate> global_template, v8::Handle<v8::ObjectTemplate> global_template,
v8::ExtensionConfiguration* extensions); v8::ExtensionConfiguration* extensions);
~Genesis(); ~Genesis() { }
Handle<Context> result() { return result_; } Handle<Context> result() { return result_; }
Genesis* previous() { return previous_; } Genesis* previous() { return previous_; }
static Genesis* current() { return current_; }
// Support for thread preemption.
static int ArchiveSpacePerThread();
static char* ArchiveState(char* to);
static char* RestoreState(char* from);
private: private:
Handle<Context> global_context_; Handle<Context> global_context_;
@ -216,18 +195,46 @@ class Genesis BASE_EMBEDDED {
// triggered during environment creation there may be weak handle // triggered during environment creation there may be weak handle
// processing callbacks which may create new environments. // processing callbacks which may create new environments.
Genesis* previous_; Genesis* previous_;
static Genesis* current_;
Handle<Context> global_context() { return global_context_; } Handle<Context> global_context() { return global_context_; }
void CreateRoots(v8::Handle<v8::ObjectTemplate> global_template, // Creates some basic objects. Used for creating a context from scratch.
Handle<Object> global_object); void CreateRoots();
// Creates the empty function. Used for creating a context from scratch.
Handle<JSFunction> CreateEmptyFunction();
// Creates the global objects using the global and the template passed in
// through the API. We call this regardless of whether we are building a
// context from scratch or using a deserialized one from the partial snapshot
// but in the latter case we don't use the objects it produces directly, as
// we have to used the deserialized ones that are linked together with the
// rest of the context snapshot.
Handle<JSGlobalProxy> CreateNewGlobals(
v8::Handle<v8::ObjectTemplate> global_template,
Handle<Object> global_object,
Handle<GlobalObject>* global_proxy_out);
// Hooks the given global proxy into the context. If the context was created
// by deserialization then this will unhook the global proxy that was
// deserialized, leaving the GC to pick it up.
void HookUpGlobalProxy(Handle<GlobalObject> inner_global,
Handle<JSGlobalProxy> global_proxy);
// Similarly, we want to use the inner global that has been created by the
// templates passed through the API. The inner global from the snapshot is
// detached from the other objects in the snapshot.
void HookUpInnerGlobal(Handle<GlobalObject> inner_global);
// New context initialization. Used for creating a context from scratch.
void InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> empty_function);
// Installs the contents of the native .js files on the global objects.
// Used for creating a context from scratch.
void InstallNativeFunctions(); void InstallNativeFunctions();
bool InstallNatives(); bool InstallNatives();
bool InstallExtensions(v8::ExtensionConfiguration* extensions); // Used both for deserialized and from-scratch contexts to add the extensions
bool InstallExtension(const char* name); // provided.
bool InstallExtension(v8::RegisteredExtension* current); static bool InstallExtensions(Handle<Context> global_context,
bool InstallSpecialObjects(); v8::ExtensionConfiguration* extensions);
static bool InstallExtension(const char* name);
static bool InstallExtension(v8::RegisteredExtension* current);
static void InstallSpecialObjects(Handle<Context> global_context);
bool InstallJSBuiltins(Handle<JSBuiltinsObject> builtins); bool InstallJSBuiltins(Handle<JSBuiltinsObject> builtins);
bool ConfigureApiObject(Handle<JSObject> object, bool ConfigureApiObject(Handle<JSObject> object,
Handle<ObjectTemplateInfo> object_template); Handle<ObjectTemplateInfo> object_template);
@ -251,33 +258,36 @@ class Genesis BASE_EMBEDDED {
Handle<String> source, Handle<String> source,
SourceCodeCache* cache, SourceCodeCache* cache,
v8::Extension* extension, v8::Extension* extension,
Handle<Context> top_context,
bool use_runtime_context); bool use_runtime_context);
Handle<Context> result_; Handle<Context> result_;
Handle<JSFunction> empty_function_;
BootstrapperActive active_;
friend class Bootstrapper;
}; };
Genesis* Genesis::current_ = NULL;
void Bootstrapper::Iterate(ObjectVisitor* v) { void Bootstrapper::Iterate(ObjectVisitor* v) {
natives_cache.Iterate(v);
v->Synchronize("NativesCache");
extensions_cache.Iterate(v); extensions_cache.Iterate(v);
v->Synchronize("Extensions"); v->Synchronize("Extensions");
} }
bool Bootstrapper::IsActive() {
return Genesis::current() != NULL;
}
Handle<Context> Bootstrapper::CreateEnvironment( Handle<Context> Bootstrapper::CreateEnvironment(
Handle<Object> global_object, Handle<Object> global_object,
v8::Handle<v8::ObjectTemplate> global_template, v8::Handle<v8::ObjectTemplate> global_template,
v8::ExtensionConfiguration* extensions) { v8::ExtensionConfiguration* extensions) {
HandleScope scope;
Handle<Context> env;
Genesis genesis(global_object, global_template, extensions); Genesis genesis(global_object, global_template, extensions);
return genesis.result(); env = genesis.result();
if (!env.is_null()) {
if (InstallExtensions(env, extensions)) {
return env;
}
}
return Handle<Context>();
} }
@ -299,9 +309,14 @@ void Bootstrapper::DetachGlobal(Handle<Context> env) {
} }
Genesis::~Genesis() { void Bootstrapper::ReattachGlobal(Handle<Context> env,
ASSERT(current_ == this); Handle<Object> global_object) {
current_ = previous_; ASSERT(global_object->IsJSGlobalProxy());
Handle<JSGlobalProxy> global = Handle<JSGlobalProxy>::cast(global_object);
env->global()->set_global_receiver(*global);
env->set_global_proxy(*global);
SetObjectPrototype(global, Handle<JSObject>(env->global()));
global->set_context(*env);
} }
@ -384,22 +399,7 @@ Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor(
} }
void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template, Handle<JSFunction> Genesis::CreateEmptyFunction() {
Handle<Object> global_object) {
HandleScope scope;
// Allocate the global context FixedArray first and then patch the
// closure and extension object later (we need the empty function
// and the global object, but in order to create those, we need the
// global context).
global_context_ =
Handle<Context>::cast(
GlobalHandles::Create(*Factory::NewGlobalContext()));
Top::set_context(*global_context());
// Allocate the message listeners object.
v8::NeanderArray listeners;
global_context()->set_message_listeners(*listeners.value());
// Allocate the map for function instances. // Allocate the map for function instances.
Handle<Map> fm = Factory::NewMap(JS_FUNCTION_TYPE, JSFunction::kSize); Handle<Map> fm = Factory::NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
global_context()->set_function_instance_map(*fm); global_context()->set_function_instance_map(*fm);
@ -443,137 +443,195 @@ void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
Handle<JSFunction> empty_function = Handle<JSFunction> empty_function =
Factory::NewFunction(symbol, Factory::null_value()); Factory::NewFunction(symbol, Factory::null_value());
{ // --- E m p t y --- // --- E m p t y ---
Handle<Code> code = Handle<Code> code =
Handle<Code>(Builtins::builtin(Builtins::EmptyFunction)); Handle<Code>(Builtins::builtin(Builtins::EmptyFunction));
empty_function->set_code(*code); empty_function->set_code(*code);
Handle<String> source = Factory::NewStringFromAscii(CStrVector("() {}")); Handle<String> source = Factory::NewStringFromAscii(CStrVector("() {}"));
Handle<Script> script = Factory::NewScript(source); Handle<Script> script = Factory::NewScript(source);
script->set_type(Smi::FromInt(Script::TYPE_NATIVE)); script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
empty_function->shared()->set_script(*script); empty_function->shared()->set_script(*script);
empty_function->shared()->set_start_position(0); empty_function->shared()->set_start_position(0);
empty_function->shared()->set_end_position(source->length()); empty_function->shared()->set_end_position(source->length());
empty_function->shared()->DontAdaptArguments(); empty_function->shared()->DontAdaptArguments();
global_context()->function_map()->set_prototype(*empty_function); global_context()->function_map()->set_prototype(*empty_function);
global_context()->function_instance_map()->set_prototype(*empty_function); global_context()->function_instance_map()->set_prototype(*empty_function);
// Allocate the function map first and then patch the prototype later
Handle<Map> empty_fm = Factory::CopyMapDropDescriptors(fm);
empty_fm->set_instance_descriptors(*function_map_descriptors);
empty_fm->set_prototype(global_context()->object_function()->prototype());
empty_function->set_map(*empty_fm);
}
{ // --- G l o b a l --- // Allocate the function map first and then patch the prototype later
// Step 1: create a fresh inner JSGlobalObject Handle<Map> empty_fm = Factory::CopyMapDropDescriptors(fm);
Handle<GlobalObject> object; empty_fm->set_instance_descriptors(*function_map_descriptors);
{ empty_fm->set_prototype(global_context()->object_function()->prototype());
Handle<JSFunction> js_global_function; empty_function->set_map(*empty_fm);
Handle<ObjectTemplateInfo> js_global_template; return empty_function;
if (!global_template.IsEmpty()) { }
// Get prototype template of the global_template
Handle<ObjectTemplateInfo> data =
v8::Utils::OpenHandle(*global_template);
Handle<FunctionTemplateInfo> global_constructor =
Handle<FunctionTemplateInfo>(
FunctionTemplateInfo::cast(data->constructor()));
Handle<Object> proto_template(global_constructor->prototype_template());
if (!proto_template->IsUndefined()) {
js_global_template =
Handle<ObjectTemplateInfo>::cast(proto_template);
}
}
if (js_global_template.is_null()) {
Handle<String> name = Handle<String>(Heap::empty_symbol());
Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
js_global_function =
Factory::NewFunction(name, JS_GLOBAL_OBJECT_TYPE,
JSGlobalObject::kSize, code, true);
// Change the constructor property of the prototype of the
// hidden global function to refer to the Object function.
Handle<JSObject> prototype =
Handle<JSObject>(
JSObject::cast(js_global_function->instance_prototype()));
SetProperty(prototype, Factory::constructor_symbol(),
Top::object_function(), NONE);
} else {
Handle<FunctionTemplateInfo> js_global_constructor(
FunctionTemplateInfo::cast(js_global_template->constructor()));
js_global_function =
Factory::CreateApiFunction(js_global_constructor,
Factory::InnerGlobalObject);
}
js_global_function->initial_map()->set_is_hidden_prototype(); void Genesis::CreateRoots() {
object = Factory::NewGlobalObject(js_global_function); // Allocate the global context FixedArray first and then patch the
} // closure and extension object later (we need the empty function
// and the global object, but in order to create those, we need the
// global context).
global_context_ =
Handle<Context>::cast(
GlobalHandles::Create(*Factory::NewGlobalContext()));
Top::set_context(*global_context());
// Set the global context for the global object. // Allocate the message listeners object.
object->set_global_context(*global_context()); {
v8::NeanderArray listeners;
// Step 2: create or re-initialize the global proxy object. global_context()->set_message_listeners(*listeners.value());
Handle<JSGlobalProxy> global_proxy; }
{ }
Handle<JSFunction> global_proxy_function;
if (global_template.IsEmpty()) {
Handle<String> name = Handle<String>(Heap::empty_symbol());
Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
global_proxy_function =
Factory::NewFunction(name, JS_GLOBAL_PROXY_TYPE,
JSGlobalProxy::kSize, code, true);
} else {
Handle<ObjectTemplateInfo> data =
v8::Utils::OpenHandle(*global_template);
Handle<FunctionTemplateInfo> global_constructor(
FunctionTemplateInfo::cast(data->constructor()));
global_proxy_function =
Factory::CreateApiFunction(global_constructor,
Factory::OuterGlobalObject);
}
Handle<String> global_name = Factory::LookupAsciiSymbol("global");
global_proxy_function->shared()->set_instance_class_name(*global_name);
global_proxy_function->initial_map()->set_is_access_check_needed(true);
// Set global_proxy.__proto__ to js_global after ConfigureGlobalObjects
if (global_object.location() != NULL) {
ASSERT(global_object->IsJSGlobalProxy());
global_proxy =
ReinitializeJSGlobalProxy(
global_proxy_function,
Handle<JSGlobalProxy>::cast(global_object));
} else {
global_proxy = Handle<JSGlobalProxy>::cast(
Factory::NewJSObject(global_proxy_function, TENURED));
}
// Security setup: Set the security token of the global object to Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
// its the inner global. This makes the security check between two v8::Handle<v8::ObjectTemplate> global_template,
// different contexts fail by default even in case of global Handle<Object> global_object,
// object reinitialization. Handle<GlobalObject>* inner_global_out) {
object->set_global_receiver(*global_proxy); // The argument global_template aka data is an ObjectTemplateInfo.
global_proxy->set_context(*global_context()); // It has a constructor pointer that points at global_constructor which is a
// FunctionTemplateInfo.
// The global_constructor is used to create or reinitialize the global_proxy.
// The global_constructor also has a prototype_template pointer that points at
// js_global_template which is an ObjectTemplateInfo.
// That in turn has a constructor pointer that points at
// js_global_constructor which is a FunctionTemplateInfo.
// js_global_constructor is used to make js_global_function
// js_global_function is used to make the new inner_global.
//
// --- G l o b a l ---
// Step 1: Create a fresh inner JSGlobalObject.
Handle<JSFunction> js_global_function;
Handle<ObjectTemplateInfo> js_global_template;
if (!global_template.IsEmpty()) {
// Get prototype template of the global_template.
Handle<ObjectTemplateInfo> data =
v8::Utils::OpenHandle(*global_template);
Handle<FunctionTemplateInfo> global_constructor =
Handle<FunctionTemplateInfo>(
FunctionTemplateInfo::cast(data->constructor()));
Handle<Object> proto_template(global_constructor->prototype_template());
if (!proto_template->IsUndefined()) {
js_global_template =
Handle<ObjectTemplateInfo>::cast(proto_template);
} }
}
{ // --- G l o b a l C o n t e x t --- if (js_global_template.is_null()) {
// use the empty function as closure (no scope info) Handle<String> name = Handle<String>(Heap::empty_symbol());
global_context()->set_closure(*empty_function); Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
global_context()->set_fcontext(*global_context()); js_global_function =
global_context()->set_previous(NULL); Factory::NewFunction(name, JS_GLOBAL_OBJECT_TYPE,
JSGlobalObject::kSize, code, true);
// set extension and global object // Change the constructor property of the prototype of the
global_context()->set_extension(*object); // hidden global function to refer to the Object function.
global_context()->set_global(*object); Handle<JSObject> prototype =
global_context()->set_global_proxy(*global_proxy); Handle<JSObject>(
// use inner global object as security token by default JSObject::cast(js_global_function->instance_prototype()));
global_context()->set_security_token(*object); SetProperty(prototype, Factory::constructor_symbol(),
} Top::object_function(), NONE);
} else {
Handle<FunctionTemplateInfo> js_global_constructor(
FunctionTemplateInfo::cast(js_global_template->constructor()));
js_global_function =
Factory::CreateApiFunction(js_global_constructor,
Factory::InnerGlobalObject);
}
js_global_function->initial_map()->set_is_hidden_prototype();
Handle<GlobalObject> inner_global =
Factory::NewGlobalObject(js_global_function);
if (inner_global_out != NULL) {
*inner_global_out = inner_global;
}
// Step 2: create or re-initialize the global proxy object.
Handle<JSFunction> global_proxy_function;
if (global_template.IsEmpty()) {
Handle<String> name = Handle<String>(Heap::empty_symbol());
Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal));
global_proxy_function =
Factory::NewFunction(name, JS_GLOBAL_PROXY_TYPE,
JSGlobalProxy::kSize, code, true);
} else {
Handle<ObjectTemplateInfo> data =
v8::Utils::OpenHandle(*global_template);
Handle<FunctionTemplateInfo> global_constructor(
FunctionTemplateInfo::cast(data->constructor()));
global_proxy_function =
Factory::CreateApiFunction(global_constructor,
Factory::OuterGlobalObject);
}
Handle<String> global_name = Factory::LookupAsciiSymbol("global");
global_proxy_function->shared()->set_instance_class_name(*global_name);
global_proxy_function->initial_map()->set_is_access_check_needed(true);
// Set global_proxy.__proto__ to js_global after ConfigureGlobalObjects
// Return the global proxy.
Handle<JSObject> global = Handle<JSObject>(global_context()->global()); if (global_object.location() != NULL) {
SetProperty(global, object_name, Top::object_function(), DONT_ENUM); ASSERT(global_object->IsJSGlobalProxy());
return ReinitializeJSGlobalProxy(
global_proxy_function,
Handle<JSGlobalProxy>::cast(global_object));
} else {
return Handle<JSGlobalProxy>::cast(
Factory::NewJSObject(global_proxy_function, TENURED));
} }
}
void Genesis::HookUpGlobalProxy(Handle<GlobalObject> inner_global,
Handle<JSGlobalProxy> global_proxy) {
// Set the global context for the global object.
inner_global->set_global_context(*global_context());
inner_global->set_global_receiver(*global_proxy);
global_proxy->set_context(*global_context());
global_context()->set_global_proxy(*global_proxy);
}
void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) {
Handle<GlobalObject> inner_global_from_snapshot(
GlobalObject::cast(global_context_->extension()));
Handle<JSBuiltinsObject> builtins_global(global_context_->builtins());
global_context_->set_extension(*inner_global);
global_context_->set_global(*inner_global);
global_context_->set_security_token(*inner_global);
static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
ForceSetProperty(builtins_global,
Factory::LookupAsciiSymbol("global"),
inner_global,
attributes);
// Setup the reference from the global object to the builtins object.
JSGlobalObject::cast(*inner_global)->set_builtins(*builtins_global);
TransferNamedProperties(inner_global_from_snapshot, inner_global);
TransferIndexedProperties(inner_global_from_snapshot, inner_global);
}
// This is only called if we are not using snapshots. The equivalent
// work in the snapshot case is done in HookUpInnerGlobal.
void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> empty_function) {
// --- G l o b a l C o n t e x t ---
// Use the empty function as closure (no scope info).
global_context()->set_closure(*empty_function);
global_context()->set_fcontext(*global_context());
global_context()->set_previous(NULL);
// Set extension and global object.
global_context()->set_extension(*inner_global);
global_context()->set_global(*inner_global);
// Security setup: Set the security token of the global object to
// its the inner global. This makes the security check between two
// different contexts fail by default even in case of global
// object reinitialization.
global_context()->set_security_token(*inner_global);
Handle<String> object_name = Handle<String>(Heap::Object_symbol());
SetProperty(inner_global, object_name, Top::object_function(), DONT_ENUM);
Handle<JSObject> global = Handle<JSObject>(global_context()->global()); Handle<JSObject> global = Handle<JSObject>(global_context()->global());
@ -791,8 +849,12 @@ bool Genesis::CompileNative(Vector<const char> name, Handle<String> source) {
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
Debugger::set_compiling_natives(true); Debugger::set_compiling_natives(true);
#endif #endif
bool result = bool result = CompileScriptCached(name,
CompileScriptCached(name, source, &natives_cache, NULL, true); source,
NULL,
NULL,
Handle<Context>(Top::context()),
true);
ASSERT(Top::has_pending_exception() != result); ASSERT(Top::has_pending_exception() != result);
if (!result) Top::clear_pending_exception(); if (!result) Top::clear_pending_exception();
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
@ -806,46 +868,46 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
Handle<String> source, Handle<String> source,
SourceCodeCache* cache, SourceCodeCache* cache,
v8::Extension* extension, v8::Extension* extension,
Handle<Context> top_context,
bool use_runtime_context) { bool use_runtime_context) {
HandleScope scope; HandleScope scope;
Handle<JSFunction> boilerplate; Handle<SharedFunctionInfo> function_info;
// If we can't find the function in the cache, we compile a new // If we can't find the function in the cache, we compile a new
// function and insert it into the cache. // function and insert it into the cache.
if (!cache->Lookup(name, &boilerplate)) { if (cache == NULL || !cache->Lookup(name, &function_info)) {
ASSERT(source->IsAsciiRepresentation()); ASSERT(source->IsAsciiRepresentation());
Handle<String> script_name = Factory::NewStringFromUtf8(name); Handle<String> script_name = Factory::NewStringFromUtf8(name);
boilerplate = function_info = Compiler::Compile(
Compiler::Compile( source,
source, script_name,
script_name, 0,
0, 0,
0, extension,
extension, NULL,
NULL, Handle<String>::null(),
Handle<String>::null(), use_runtime_context ? NATIVES_CODE : NOT_NATIVES_CODE);
use_runtime_context ? NATIVES_CODE : NOT_NATIVES_CODE); if (function_info.is_null()) return false;
if (boilerplate.is_null()) return false; if (cache != NULL) cache->Add(name, function_info);
cache->Add(name, boilerplate);
} }
// Setup the function context. Conceptually, we should clone the // Setup the function context. Conceptually, we should clone the
// function before overwriting the context but since we're in a // function before overwriting the context but since we're in a
// single-threaded environment it is not strictly necessary. // single-threaded environment it is not strictly necessary.
ASSERT(Top::context()->IsGlobalContext()); ASSERT(top_context->IsGlobalContext());
Handle<Context> context = Handle<Context> context =
Handle<Context>(use_runtime_context Handle<Context>(use_runtime_context
? Top::context()->runtime_context() ? Handle<Context>(top_context->runtime_context())
: Top::context()); : top_context);
Handle<JSFunction> fun = Handle<JSFunction> fun =
Factory::NewFunctionFromBoilerplate(boilerplate, context); Factory::NewFunctionFromSharedFunctionInfo(function_info, context);
// Call function using either the runtime object or the global // Call function using either the runtime object or the global
// object as the receiver. Provide no parameters. // object as the receiver. Provide no parameters.
Handle<Object> receiver = Handle<Object> receiver =
Handle<Object>(use_runtime_context Handle<Object>(use_runtime_context
? Top::context()->builtins() ? top_context->builtins()
: Top::context()->global()); : top_context->global());
bool has_pending_exception; bool has_pending_exception;
Handle<Object> result = Handle<Object> result =
Execution::Call(fun, receiver, 0, NULL, &has_pending_exception); Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
@ -1047,7 +1109,7 @@ bool Genesis::InstallNatives() {
// Allocate the empty script. // Allocate the empty script.
Handle<Script> script = Factory::NewScript(Factory::empty_string()); Handle<Script> script = Factory::NewScript(Factory::empty_string());
script->set_type(Smi::FromInt(Script::TYPE_NATIVE)); script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
global_context()->set_empty_script(*script); Heap::public_set_empty_script(*script);
} }
{ {
// Builtin function for OpaqueReference -- a JSValue-based object, // Builtin function for OpaqueReference -- a JSValue-based object,
@ -1063,48 +1125,23 @@ bool Genesis::InstallNatives() {
global_context()->set_opaque_reference_function(*opaque_reference_fun); global_context()->set_opaque_reference_function(*opaque_reference_fun);
} }
if (FLAG_natives_file == NULL) { if (FLAG_disable_native_files) {
// Without natives file, install default natives.
for (int i = Natives::GetDelayCount();
i < Natives::GetBuiltinsCount();
i++) {
if (!CompileBuiltin(i)) return false;
// TODO(ager): We really only need to install the JS builtin
// functions on the builtins object after compiling and running
// runtime.js.
if (!InstallJSBuiltins(builtins)) return false;
}
// Setup natives with lazy loading.
SetupLazy(Handle<JSFunction>(global_context()->date_function()),
Natives::GetIndex("date"),
Top::global_context(),
Handle<Context>(Top::context()->runtime_context()));
SetupLazy(Handle<JSFunction>(global_context()->regexp_function()),
Natives::GetIndex("regexp"),
Top::global_context(),
Handle<Context>(Top::context()->runtime_context()));
SetupLazy(Handle<JSObject>(global_context()->json_object()),
Natives::GetIndex("json"),
Top::global_context(),
Handle<Context>(Top::context()->runtime_context()));
} else if (strlen(FLAG_natives_file) != 0) {
// Otherwise install natives from natives file if file exists and
// compiles.
bool exists;
Vector<const char> source = ReadFile(FLAG_natives_file, &exists);
Handle<String> source_string = Factory::NewStringFromAscii(source);
if (source.is_empty()) return false;
bool result = CompileNative(CStrVector(FLAG_natives_file), source_string);
if (!result) return false;
} else {
// Empty natives file name - do not install any natives.
PrintF("Warning: Running without installed natives!\n"); PrintF("Warning: Running without installed natives!\n");
return true; return true;
} }
// Install natives.
for (int i = Natives::GetDebuggerCount();
i < Natives::GetBuiltinsCount();
i++) {
Vector<const char> name = Natives::GetScriptName(i);
if (!CompileBuiltin(i)) return false;
// TODO(ager): We really only need to install the JS builtin
// functions on the builtins object after compiling and running
// runtime.js.
if (!InstallJSBuiltins(builtins)) return false;
}
InstallNativeFunctions(); InstallNativeFunctions();
// Install Function.prototype.call and apply. // Install Function.prototype.call and apply.
@ -1143,14 +1180,29 @@ bool Genesis::InstallNatives() {
#ifdef DEBUG #ifdef DEBUG
builtins->Verify(); builtins->Verify();
#endif #endif
return true;
}
int BootstrapperActive::nesting_ = 0;
bool Bootstrapper::InstallExtensions(Handle<Context> global_context,
v8::ExtensionConfiguration* extensions) {
BootstrapperActive active;
SaveContext saved_context;
Top::set_context(*global_context);
if (!Genesis::InstallExtensions(global_context, extensions)) return false;
Genesis::InstallSpecialObjects(global_context);
return true; return true;
} }
bool Genesis::InstallSpecialObjects() { void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
HandleScope scope; HandleScope scope;
Handle<JSGlobalObject> js_global( Handle<JSGlobalObject> js_global(
JSGlobalObject::cast(global_context()->global())); JSGlobalObject::cast(global_context->global()));
// Expose the natives in global if a name for it is specified. // Expose the natives in global if a name for it is specified.
if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) { if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
Handle<String> natives_string = Handle<String> natives_string =
@ -1173,13 +1225,12 @@ bool Genesis::InstallSpecialObjects() {
if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) { if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
// If loading fails we just bail out without installing the // If loading fails we just bail out without installing the
// debugger but without tanking the whole context. // debugger but without tanking the whole context.
if (!Debug::Load()) if (!Debug::Load()) return;
return true;
// Set the security token for the debugger context to the same as // Set the security token for the debugger context to the same as
// the shell global context to allow calling between these (otherwise // the shell global context to allow calling between these (otherwise
// exposing debug global object doesn't make much sense). // exposing debug global object doesn't make much sense).
Debug::debug_context()->set_security_token( Debug::debug_context()->set_security_token(
global_context()->security_token()); global_context->security_token());
Handle<String> debug_string = Handle<String> debug_string =
Factory::LookupAsciiSymbol(FLAG_expose_debug_as); Factory::LookupAsciiSymbol(FLAG_expose_debug_as);
@ -1187,19 +1238,18 @@ bool Genesis::InstallSpecialObjects() {
Handle<Object>(Debug::debug_context()->global_proxy()), DONT_ENUM); Handle<Object>(Debug::debug_context()->global_proxy()), DONT_ENUM);
} }
#endif #endif
return true;
} }
bool Genesis::InstallExtensions(v8::ExtensionConfiguration* extensions) { bool Genesis::InstallExtensions(Handle<Context> global_context,
v8::ExtensionConfiguration* extensions) {
// Clear coloring of extension list // Clear coloring of extension list
v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension(); v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
while (current != NULL) { while (current != NULL) {
current->set_state(v8::UNVISITED); current->set_state(v8::UNVISITED);
current = current->next(); current = current->next();
} }
// Install auto extensions // Install auto extensions.
current = v8::RegisteredExtension::first_extension(); current = v8::RegisteredExtension::first_extension();
while (current != NULL) { while (current != NULL) {
if (current->extension()->auto_enable()) if (current->extension()->auto_enable())
@ -1263,7 +1313,9 @@ bool Genesis::InstallExtension(v8::RegisteredExtension* current) {
Handle<String> source_code = Factory::NewStringFromAscii(source); Handle<String> source_code = Factory::NewStringFromAscii(source);
bool result = CompileScriptCached(CStrVector(extension->name()), bool result = CompileScriptCached(CStrVector(extension->name()),
source_code, source_code,
&extensions_cache, extension, &extensions_cache,
extension,
Handle<Context>(Top::context()),
false); false);
ASSERT(Top::has_pending_exception() != result); ASSERT(Top::has_pending_exception() != result);
if (!result) { if (!result) {
@ -1294,7 +1346,7 @@ bool Genesis::ConfigureGlobalObjects(
v8::Handle<v8::ObjectTemplate> global_proxy_template) { v8::Handle<v8::ObjectTemplate> global_proxy_template) {
Handle<JSObject> global_proxy( Handle<JSObject> global_proxy(
JSObject::cast(global_context()->global_proxy())); JSObject::cast(global_context()->global_proxy()));
Handle<JSObject> js_global(JSObject::cast(global_context()->global())); Handle<JSObject> inner_global(JSObject::cast(global_context()->global()));
if (!global_proxy_template.IsEmpty()) { if (!global_proxy_template.IsEmpty()) {
// Configure the global proxy object. // Configure the global proxy object.
@ -1308,11 +1360,11 @@ bool Genesis::ConfigureGlobalObjects(
if (!proxy_constructor->prototype_template()->IsUndefined()) { if (!proxy_constructor->prototype_template()->IsUndefined()) {
Handle<ObjectTemplateInfo> inner_data( Handle<ObjectTemplateInfo> inner_data(
ObjectTemplateInfo::cast(proxy_constructor->prototype_template())); ObjectTemplateInfo::cast(proxy_constructor->prototype_template()));
if (!ConfigureApiObject(js_global, inner_data)) return false; if (!ConfigureApiObject(inner_global, inner_data)) return false;
} }
} }
SetObjectPrototype(global_proxy, js_global); SetObjectPrototype(global_proxy, inner_global);
return true; return true;
} }
@ -1366,15 +1418,13 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
// If the property is already there we skip it // If the property is already there we skip it
if (result.IsProperty()) continue; if (result.IsProperty()) continue;
HandleScope inner; HandleScope inner;
Handle<DescriptorArray> inst_descs = ASSERT(!to->HasFastProperties());
Handle<DescriptorArray>(to->map()->instance_descriptors()); // Add to dictionary.
Handle<String> key = Handle<String>(descs->GetKey(i)); Handle<String> key = Handle<String>(descs->GetKey(i));
Handle<Object> entry = Handle<Object>(descs->GetCallbacksObject(i)); Handle<Object> callbacks(descs->GetCallbacksObject(i));
inst_descs = Factory::CopyAppendProxyDescriptor(inst_descs, PropertyDetails d =
key, PropertyDetails(details.attributes(), CALLBACKS, details.index());
entry, SetNormalizedProperty(to, key, callbacks, d);
details.attributes());
to->map()->set_instance_descriptors(*inst_descs);
break; break;
} }
case MAP_TRANSITION: case MAP_TRANSITION:
@ -1459,32 +1509,51 @@ void Genesis::MakeFunctionInstancePrototypeWritable() {
Genesis::Genesis(Handle<Object> global_object, Genesis::Genesis(Handle<Object> global_object,
v8::Handle<v8::ObjectTemplate> global_template, v8::Handle<v8::ObjectTemplate> global_template,
v8::ExtensionConfiguration* extensions) { v8::ExtensionConfiguration* extensions) {
// Link this genesis object into the stacked genesis chain. This
// must be done before any early exits because the destructor
// will always do unlinking.
previous_ = current_;
current_ = this;
result_ = Handle<Context>::null(); result_ = Handle<Context>::null();
// If V8 isn't running and cannot be initialized, just return. // If V8 isn't running and cannot be initialized, just return.
if (!V8::IsRunning() && !V8::Initialize(NULL)) return; if (!V8::IsRunning() && !V8::Initialize(NULL)) return;
// Before creating the roots we must save the context and restore it // Before creating the roots we must save the context and restore it
// on all function exits. // on all function exits.
HandleScope scope; HandleScope scope;
SaveContext context; SaveContext saved_context;
CreateRoots(global_template, global_object); Handle<Context> new_context = Snapshot::NewContextFromSnapshot();
if (!new_context.is_null()) {
if (!InstallNatives()) return; global_context_ =
Handle<Context>::cast(GlobalHandles::Create(*new_context));
MakeFunctionInstancePrototypeWritable(); Top::set_context(*global_context_);
i::Counters::contexts_created_by_snapshot.Increment();
if (!ConfigureGlobalObjects(global_template)) return; result_ = global_context_;
JSFunction* empty_function =
if (!InstallExtensions(extensions)) return; JSFunction::cast(result_->function_map()->prototype());
empty_function_ = Handle<JSFunction>(empty_function);
if (!InstallSpecialObjects()) return; Handle<GlobalObject> inner_global;
Handle<JSGlobalProxy> global_proxy =
CreateNewGlobals(global_template,
global_object,
&inner_global);
HookUpGlobalProxy(inner_global, global_proxy);
HookUpInnerGlobal(inner_global);
if (!ConfigureGlobalObjects(global_template)) return;
} else {
// We get here if there was no context snapshot.
CreateRoots();
Handle<JSFunction> empty_function = CreateEmptyFunction();
Handle<GlobalObject> inner_global;
Handle<JSGlobalProxy> global_proxy =
CreateNewGlobals(global_template, global_object, &inner_global);
HookUpGlobalProxy(inner_global, global_proxy);
InitializeGlobal(inner_global, empty_function);
if (!InstallNatives()) return;
MakeFunctionInstancePrototypeWritable();
if (!ConfigureGlobalObjects(global_template)) return;
i::Counters::contexts_created_from_scratch.Increment();
}
result_ = global_context_; result_ = global_context_;
} }
@ -1494,46 +1563,46 @@ Genesis::Genesis(Handle<Object> global_object,
// Reserve space for statics needing saving and restoring. // Reserve space for statics needing saving and restoring.
int Bootstrapper::ArchiveSpacePerThread() { int Bootstrapper::ArchiveSpacePerThread() {
return Genesis::ArchiveSpacePerThread(); return BootstrapperActive::ArchiveSpacePerThread();
} }
// Archive statics that are thread local. // Archive statics that are thread local.
char* Bootstrapper::ArchiveState(char* to) { char* Bootstrapper::ArchiveState(char* to) {
return Genesis::ArchiveState(to); return BootstrapperActive::ArchiveState(to);
} }
// Restore statics that are thread local. // Restore statics that are thread local.
char* Bootstrapper::RestoreState(char* from) { char* Bootstrapper::RestoreState(char* from) {
return Genesis::RestoreState(from); return BootstrapperActive::RestoreState(from);
} }
// Called when the top-level V8 mutex is destroyed. // Called when the top-level V8 mutex is destroyed.
void Bootstrapper::FreeThreadResources() { void Bootstrapper::FreeThreadResources() {
ASSERT(Genesis::current() == NULL); ASSERT(!BootstrapperActive::IsActive());
} }
// Reserve space for statics needing saving and restoring. // Reserve space for statics needing saving and restoring.
int Genesis::ArchiveSpacePerThread() { int BootstrapperActive::ArchiveSpacePerThread() {
return sizeof(current_); return sizeof(nesting_);
} }
// Archive statics that are thread local. // Archive statics that are thread local.
char* Genesis::ArchiveState(char* to) { char* BootstrapperActive::ArchiveState(char* to) {
*reinterpret_cast<Genesis**>(to) = current_; *reinterpret_cast<int*>(to) = nesting_;
current_ = NULL; nesting_ = 0;
return to + sizeof(current_); return to + sizeof(nesting_);
} }
// Restore statics that are thread local. // Restore statics that are thread local.
char* Genesis::RestoreState(char* from) { char* BootstrapperActive::RestoreState(char* from) {
current_ = *reinterpret_cast<Genesis**>(from); nesting_ = *reinterpret_cast<int*>(from);
return from + sizeof(current_); return from + sizeof(nesting_);
} }
} } // namespace v8::internal } } // namespace v8::internal

32
deps/v8/src/bootstrapper.h

@ -32,6 +32,24 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
class BootstrapperActive BASE_EMBEDDED {
public:
BootstrapperActive() { nesting_++; }
~BootstrapperActive() { nesting_--; }
// Support for thread preemption.
static int ArchiveSpacePerThread();
static char* ArchiveState(char* to);
static char* RestoreState(char* from);
private:
static bool IsActive() { return nesting_ != 0; }
static int nesting_;
friend class Bootstrapper;
};
// The Boostrapper is the public interface for creating a JavaScript global // The Boostrapper is the public interface for creating a JavaScript global
// context. // context.
class Bootstrapper : public AllStatic { class Bootstrapper : public AllStatic {
@ -50,17 +68,17 @@ class Bootstrapper : public AllStatic {
// Detach the environment from its outer global object. // Detach the environment from its outer global object.
static void DetachGlobal(Handle<Context> env); static void DetachGlobal(Handle<Context> env);
// Reattach an outer global object to an environment.
static void ReattachGlobal(Handle<Context> env, Handle<Object> global_object);
// Traverses the pointers for memory management. // Traverses the pointers for memory management.
static void Iterate(ObjectVisitor* v); static void Iterate(ObjectVisitor* v);
// Accessors for the native scripts cache. Used in lazy loading. // Accessor for the native scripts source code.
static Handle<String> NativesSourceLookup(int index); static Handle<String> NativesSourceLookup(int index);
static bool NativesCacheLookup(Vector<const char> name,
Handle<JSFunction>* handle);
static void NativesCacheAdd(Vector<const char> name, Handle<JSFunction> fun);
// Tells whether bootstrapping is active. // Tells whether bootstrapping is active.
static bool IsActive(); static bool IsActive() { return BootstrapperActive::IsActive(); }
// Encoding/decoding support for fixup flags. // Encoding/decoding support for fixup flags.
class FixupFlagsUseCodeObject: public BitField<bool, 0, 1> {}; class FixupFlagsUseCodeObject: public BitField<bool, 0, 1> {};
@ -75,6 +93,10 @@ class Bootstrapper : public AllStatic {
// This will allocate a char array that is deleted when V8 is shut down. // This will allocate a char array that is deleted when V8 is shut down.
// It should only be used for strictly finite allocations. // It should only be used for strictly finite allocations.
static char* AllocateAutoDeletedArray(int bytes); static char* AllocateAutoDeletedArray(int bytes);
// Used for new context creation.
static bool InstallExtensions(Handle<Context> global_context,
v8::ExtensionConfiguration* extensions);
}; };

44
deps/v8/src/builtins.cc

@ -443,6 +443,38 @@ BUILTIN(ArrayPop) {
} }
static FixedArray* LeftTrimFixedArray(FixedArray* elms) {
// For now this trick is only applied to fixed arrays in new space.
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
// In old space we do not use this trick to avoid dealing with
// remembered sets.
ASSERT(Heap::new_space()->Contains(elms));
Object** former_map =
HeapObject::RawField(elms, FixedArray::kMapOffset);
Object** former_length =
HeapObject::RawField(elms, FixedArray::kLengthOffset);
Object** former_first =
HeapObject::RawField(elms, FixedArray::kHeaderSize);
// Check that we don't forget to copy all the bits.
STATIC_ASSERT(FixedArray::kMapOffset + 2 * kPointerSize
== FixedArray::kHeaderSize);
int len = elms->length();
*former_first = reinterpret_cast<Object*>(len - 1);
*former_length = Heap::fixed_array_map();
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
*former_map = Heap::raw_unchecked_one_pointer_filler_map();
ASSERT(elms->address() + kPointerSize == (elms + kPointerSize)->address());
return elms + kPointerSize;
}
BUILTIN(ArrayShift) { BUILTIN(ArrayShift) {
Object* receiver = *args.receiver(); Object* receiver = *args.receiver();
FixedArray* elms = NULL; FixedArray* elms = NULL;
@ -462,10 +494,14 @@ BUILTIN(ArrayShift) {
first = Heap::undefined_value(); first = Heap::undefined_value();
} }
// Shift the elements. if (Heap::new_space()->Contains(elms)) {
AssertNoAllocation no_gc; array->set_elements(LeftTrimFixedArray(elms));
MoveElements(&no_gc, elms, 0, elms, 1, len - 1); } else {
elms->set(len - 1, Heap::the_hole_value()); // Shift the elements.
AssertNoAllocation no_gc;
MoveElements(&no_gc, elms, 0, elms, 1, len - 1);
elms->set(len - 1, Heap::the_hole_value());
}
// Set the length. // Set the length.
array->set_length(Smi::FromInt(len - 1)); array->set_length(Smi::FromInt(len - 1));

9
deps/v8/src/circular-queue-inl.h

@ -82,11 +82,10 @@ Record* CircularQueue<Record>::Next(Record* curr) {
void* SamplingCircularQueue::Enqueue() { void* SamplingCircularQueue::Enqueue() {
Cell* enqueue_pos = reinterpret_cast<Cell*>( WrapPositionIfNeeded(&producer_pos_->enqueue_pos);
Thread::GetThreadLocal(producer_key_)); void* result = producer_pos_->enqueue_pos;
WrapPositionIfNeeded(&enqueue_pos); producer_pos_->enqueue_pos += record_size_;
Thread::SetThreadLocal(producer_key_, enqueue_pos + record_size_); return result;
return enqueue_pos;
} }

86
deps/v8/src/circular-queue.cc

@ -52,52 +52,44 @@ SamplingCircularQueue::SamplingCircularQueue(int record_size_in_bytes,
buffer_[i] = kClear; buffer_[i] = kClear;
} }
buffer_[buffer_size_] = kEnd; buffer_[buffer_size_] = kEnd;
// Layout producer and consumer position pointers each on their own
// cache lines to avoid cache lines thrashing due to simultaneous
// updates of positions by different processor cores.
const int positions_size =
RoundUp(1, kProcessorCacheLineSize) +
RoundUp(sizeof(ProducerPosition), kProcessorCacheLineSize) +
RoundUp(sizeof(ConsumerPosition), kProcessorCacheLineSize);
positions_ = NewArray<byte>(positions_size);
producer_pos_ = reinterpret_cast<ProducerPosition*>(
RoundUp(positions_, kProcessorCacheLineSize));
producer_pos_->enqueue_pos = buffer_;
consumer_pos_ = reinterpret_cast<ConsumerPosition*>(
reinterpret_cast<byte*>(producer_pos_) + kProcessorCacheLineSize);
ASSERT(reinterpret_cast<byte*>(consumer_pos_ + 1) <=
positions_ + positions_size);
consumer_pos_->dequeue_chunk_pos = buffer_;
consumer_pos_->dequeue_chunk_poll_pos = buffer_ + producer_consumer_distance_;
consumer_pos_->dequeue_pos = NULL;
} }
SamplingCircularQueue::~SamplingCircularQueue() { SamplingCircularQueue::~SamplingCircularQueue() {
DeleteArray(positions_);
DeleteArray(buffer_); DeleteArray(buffer_);
} }
void SamplingCircularQueue::SetUpProducer() {
producer_key_ = Thread::CreateThreadLocalKey();
Thread::SetThreadLocal(producer_key_, buffer_);
}
void SamplingCircularQueue::TearDownProducer() {
Thread::DeleteThreadLocalKey(producer_key_);
}
void SamplingCircularQueue::SetUpConsumer() {
consumer_key_ = Thread::CreateThreadLocalKey();
ConsumerPosition* cp = new ConsumerPosition;
cp->dequeue_chunk_pos = buffer_;
cp->dequeue_chunk_poll_pos = buffer_ + producer_consumer_distance_;
cp->dequeue_pos = NULL;
Thread::SetThreadLocal(consumer_key_, cp);
}
void SamplingCircularQueue::TearDownConsumer() {
delete reinterpret_cast<ConsumerPosition*>(
Thread::GetThreadLocal(consumer_key_));
Thread::DeleteThreadLocalKey(consumer_key_);
}
void* SamplingCircularQueue::StartDequeue() { void* SamplingCircularQueue::StartDequeue() {
ConsumerPosition* cp = reinterpret_cast<ConsumerPosition*>( if (consumer_pos_->dequeue_pos != NULL) {
Thread::GetThreadLocal(consumer_key_)); return consumer_pos_->dequeue_pos;
if (cp->dequeue_pos != NULL) {
return cp->dequeue_pos;
} else { } else {
if (*cp->dequeue_chunk_poll_pos != kClear) { if (*consumer_pos_->dequeue_chunk_poll_pos != kClear) {
cp->dequeue_pos = cp->dequeue_chunk_pos; consumer_pos_->dequeue_pos = consumer_pos_->dequeue_chunk_pos;
cp->dequeue_end_pos = cp->dequeue_pos + chunk_size_; consumer_pos_->dequeue_end_pos = consumer_pos_->dequeue_pos + chunk_size_;
return cp->dequeue_pos; return consumer_pos_->dequeue_pos;
} else { } else {
return NULL; return NULL;
} }
@ -106,25 +98,21 @@ void* SamplingCircularQueue::StartDequeue() {
void SamplingCircularQueue::FinishDequeue() { void SamplingCircularQueue::FinishDequeue() {
ConsumerPosition* cp = reinterpret_cast<ConsumerPosition*>( consumer_pos_->dequeue_pos += record_size_;
Thread::GetThreadLocal(consumer_key_)); if (consumer_pos_->dequeue_pos < consumer_pos_->dequeue_end_pos) return;
cp->dequeue_pos += record_size_;
if (cp->dequeue_pos < cp->dequeue_end_pos) return;
// Move to next chunk. // Move to next chunk.
cp->dequeue_pos = NULL; consumer_pos_->dequeue_pos = NULL;
*cp->dequeue_chunk_pos = kClear; *consumer_pos_->dequeue_chunk_pos = kClear;
cp->dequeue_chunk_pos += chunk_size_; consumer_pos_->dequeue_chunk_pos += chunk_size_;
WrapPositionIfNeeded(&cp->dequeue_chunk_pos); WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_pos);
cp->dequeue_chunk_poll_pos += chunk_size_; consumer_pos_->dequeue_chunk_poll_pos += chunk_size_;
WrapPositionIfNeeded(&cp->dequeue_chunk_poll_pos); WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_poll_pos);
} }
void SamplingCircularQueue::FlushResidualRecords() { void SamplingCircularQueue::FlushResidualRecords() {
ConsumerPosition* cp = reinterpret_cast<ConsumerPosition*>(
Thread::GetThreadLocal(consumer_key_));
// Eliminate producer / consumer distance. // Eliminate producer / consumer distance.
cp->dequeue_chunk_poll_pos = cp->dequeue_chunk_pos; consumer_pos_->dequeue_chunk_poll_pos = consumer_pos_->dequeue_chunk_pos;
} }

15
deps/v8/src/circular-queue.h

@ -76,15 +76,11 @@ class SamplingCircularQueue {
int buffer_size_in_chunks); int buffer_size_in_chunks);
~SamplingCircularQueue(); ~SamplingCircularQueue();
// Executed on the producer (sampler) or application thread.
void SetUpProducer();
// Enqueue returns a pointer to a memory location for storing the next // Enqueue returns a pointer to a memory location for storing the next
// record. // record.
INLINE(void* Enqueue()); INLINE(void* Enqueue());
void TearDownProducer();
// Executed on the consumer (analyzer) thread. // Executed on the consumer (analyzer) thread.
void SetUpConsumer();
// StartDequeue returns a pointer to a memory location for retrieving // StartDequeue returns a pointer to a memory location for retrieving
// the next record. After the record had been read by a consumer, // the next record. After the record had been read by a consumer,
// FinishDequeue must be called. Until that moment, subsequent calls // FinishDequeue must be called. Until that moment, subsequent calls
@ -95,7 +91,6 @@ class SamplingCircularQueue {
// the queue must be notified whether producing has been finished in order // the queue must be notified whether producing has been finished in order
// to process remaining records from the buffer. // to process remaining records from the buffer.
void FlushResidualRecords(); void FlushResidualRecords();
void TearDownConsumer();
typedef AtomicWord Cell; typedef AtomicWord Cell;
// Reserved values for the first cell of a record. // Reserved values for the first cell of a record.
@ -103,6 +98,9 @@ class SamplingCircularQueue {
static const Cell kEnd = -1; // Marks the end of the buffer. static const Cell kEnd = -1; // Marks the end of the buffer.
private: private:
struct ProducerPosition {
Cell* enqueue_pos;
};
struct ConsumerPosition { struct ConsumerPosition {
Cell* dequeue_chunk_pos; Cell* dequeue_chunk_pos;
Cell* dequeue_chunk_poll_pos; Cell* dequeue_chunk_poll_pos;
@ -118,10 +116,9 @@ class SamplingCircularQueue {
const int buffer_size_; const int buffer_size_;
const int producer_consumer_distance_; const int producer_consumer_distance_;
Cell* buffer_; Cell* buffer_;
// Store producer and consumer data in TLS to avoid modifying the byte* positions_;
// same CPU cache line from two threads simultaneously. ProducerPosition* producer_pos_;
Thread::LocalStorageKey consumer_key_; ConsumerPosition* consumer_pos_;
Thread::LocalStorageKey producer_key_;
}; };

36
deps/v8/src/codegen.cc

@ -66,38 +66,6 @@ Comment::~Comment() {
CodeGenerator* CodeGeneratorScope::top_ = NULL; CodeGenerator* CodeGeneratorScope::top_ = NULL;
DeferredCode::DeferredCode()
: masm_(CodeGeneratorScope::Current()->masm()),
statement_position_(masm_->current_statement_position()),
position_(masm_->current_position()) {
ASSERT(statement_position_ != RelocInfo::kNoPosition);
ASSERT(position_ != RelocInfo::kNoPosition);
CodeGeneratorScope::Current()->AddDeferred(this);
#ifdef DEBUG
comment_ = "";
#endif
// Copy the register locations from the code generator's frame.
// These are the registers that will be spilled on entry to the
// deferred code and restored on exit.
VirtualFrame* frame = CodeGeneratorScope::Current()->frame();
int sp_offset = frame->fp_relative(frame->stack_pointer_);
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
int loc = frame->register_location(i);
if (loc == VirtualFrame::kIllegalIndex) {
registers_[i] = kIgnore;
} else if (frame->elements_[loc].is_synced()) {
// Needs to be restored on exit but not saved on entry.
registers_[i] = frame->fp_relative(loc) | kSyncedFlag;
} else {
int offset = frame->fp_relative(loc);
registers_[i] = (offset < sp_offset) ? kPush : offset;
}
}
}
void CodeGenerator::ProcessDeferred() { void CodeGenerator::ProcessDeferred() {
while (!deferred_.is_empty()) { while (!deferred_.is_empty()) {
DeferredCode* code = deferred_.RemoveLast(); DeferredCode* code = deferred_.RemoveLast();
@ -336,8 +304,8 @@ void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) {
array->set_undefined(j++); array->set_undefined(j++);
} }
} else { } else {
Handle<JSFunction> function = Handle<SharedFunctionInfo> function =
Compiler::BuildBoilerplate(node->fun(), script(), this); Compiler::BuildFunctionInfo(node->fun(), script(), this);
// Check for stack-overflow exception. // Check for stack-overflow exception.
if (HasStackOverflow()) return; if (HasStackOverflow()) return;
array->set(j++, *function); array->set(j++, *function);

31
deps/v8/src/codegen.h

@ -31,7 +31,7 @@
#include "ast.h" #include "ast.h"
#include "code-stubs.h" #include "code-stubs.h"
#include "runtime.h" #include "runtime.h"
#include "number-info.h" #include "type-info.h"
// Include the declaration of the architecture defined class CodeGenerator. // Include the declaration of the architecture defined class CodeGenerator.
// The contract to the shared code is that the the CodeGenerator is a subclass // The contract to the shared code is that the the CodeGenerator is a subclass
@ -58,7 +58,7 @@
// ProcessDeferred // ProcessDeferred
// Generate // Generate
// ComputeLazyCompile // ComputeLazyCompile
// BuildBoilerplate // BuildFunctionInfo
// ComputeCallInitialize // ComputeCallInitialize
// ComputeCallInitializeInLoop // ComputeCallInitializeInLoop
// ProcessDeclarations // ProcessDeclarations
@ -346,8 +346,13 @@ class CompareStub: public CodeStub {
public: public:
CompareStub(Condition cc, CompareStub(Condition cc,
bool strict, bool strict,
NaNInformation nan_info = kBothCouldBeNaN) : NaNInformation nan_info = kBothCouldBeNaN,
cc_(cc), strict_(strict), never_nan_nan_(nan_info == kCantBothBeNaN) { } bool include_number_compare = true) :
cc_(cc),
strict_(strict),
never_nan_nan_(nan_info == kCantBothBeNaN),
include_number_compare_(include_number_compare),
name_(NULL) { }
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
@ -360,6 +365,16 @@ class CompareStub: public CodeStub {
// generating the minor key for other comparisons to avoid creating more // generating the minor key for other comparisons to avoid creating more
// stubs. // stubs.
bool never_nan_nan_; bool never_nan_nan_;
// Do generate the number comparison code in the stub. Stubs without number
// comparison code is used when the number comparison has been inlined, and
// the stub will be called if one of the operands is not a number.
bool include_number_compare_;
// Encoding of the minor key CCCCCCCCCCCCCCNS.
class StrictField: public BitField<bool, 0, 1> {};
class NeverNanNanField: public BitField<bool, 1, 1> {};
class IncludeNumberCompareField: public BitField<bool, 2, 1> {};
class ConditionField: public BitField<int, 3, 13> {};
Major MajorKey() { return Compare; } Major MajorKey() { return Compare; }
@ -373,12 +388,16 @@ class CompareStub: public CodeStub {
// Unfortunately you have to run without snapshots to see most of these // Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot. // names in the profile since most compare stubs end up in the snapshot.
char* name_;
const char* GetName(); const char* GetName();
#ifdef DEBUG #ifdef DEBUG
void Print() { void Print() {
PrintF("CompareStub (cc %d), (strict %s)\n", PrintF("CompareStub (cc %d), (strict %s), "
"(never_nan_nan %s), (number_compare %s)\n",
static_cast<int>(cc_), static_cast<int>(cc_),
strict_ ? "true" : "false"); strict_ ? "true" : "false",
never_nan_nan_ ? "true" : "false",
include_number_compare_ ? "included" : "not included");
} }
#endif #endif
}; };

136
deps/v8/src/compilation-cache.cc

@ -28,6 +28,7 @@
#include "v8.h" #include "v8.h"
#include "compilation-cache.h" #include "compilation-cache.h"
#include "serialize.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -101,18 +102,18 @@ class CompilationCacheScript : public CompilationSubCache {
explicit CompilationCacheScript(int generations) explicit CompilationCacheScript(int generations)
: CompilationSubCache(generations) { } : CompilationSubCache(generations) { }
Handle<JSFunction> Lookup(Handle<String> source, Handle<SharedFunctionInfo> Lookup(Handle<String> source,
Handle<Object> name, Handle<Object> name,
int line_offset, int line_offset,
int column_offset); int column_offset);
void Put(Handle<String> source, Handle<JSFunction> boilerplate); void Put(Handle<String> source, Handle<SharedFunctionInfo> function_info);
private: private:
// Note: Returns a new hash table if operation results in expansion. // Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(Handle<String> source, Handle<CompilationCacheTable> TablePut(
Handle<JSFunction> boilerplate); Handle<String> source, Handle<SharedFunctionInfo> function_info);
bool HasOrigin(Handle<JSFunction> boilerplate, bool HasOrigin(Handle<SharedFunctionInfo> function_info,
Handle<Object> name, Handle<Object> name,
int line_offset, int line_offset,
int column_offset); int column_offset);
@ -127,17 +128,19 @@ class CompilationCacheEval: public CompilationSubCache {
explicit CompilationCacheEval(int generations) explicit CompilationCacheEval(int generations)
: CompilationSubCache(generations) { } : CompilationSubCache(generations) { }
Handle<JSFunction> Lookup(Handle<String> source, Handle<Context> context); Handle<SharedFunctionInfo> Lookup(Handle<String> source,
Handle<Context> context);
void Put(Handle<String> source, void Put(Handle<String> source,
Handle<Context> context, Handle<Context> context,
Handle<JSFunction> boilerplate); Handle<SharedFunctionInfo> function_info);
private: private:
// Note: Returns a new hash table if operation results in expansion. // Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(Handle<String> source, Handle<CompilationCacheTable> TablePut(
Handle<Context> context, Handle<String> source,
Handle<JSFunction> boilerplate); Handle<Context> context,
Handle<SharedFunctionInfo> function_info);
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval); DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
}; };
@ -225,12 +228,13 @@ void CompilationSubCache::Clear() {
// We only re-use a cached function for some script source code if the // We only re-use a cached function for some script source code if the
// script originates from the same place. This is to avoid issues // script originates from the same place. This is to avoid issues
// when reporting errors, etc. // when reporting errors, etc.
bool CompilationCacheScript::HasOrigin(Handle<JSFunction> boilerplate, bool CompilationCacheScript::HasOrigin(
Handle<Object> name, Handle<SharedFunctionInfo> function_info,
int line_offset, Handle<Object> name,
int column_offset) { int line_offset,
int column_offset) {
Handle<Script> script = Handle<Script> script =
Handle<Script>(Script::cast(boilerplate->shared()->script())); Handle<Script>(Script::cast(function_info->script()));
// If the script name isn't set, the boilerplate script should have // If the script name isn't set, the boilerplate script should have
// an undefined name to have the same origin. // an undefined name to have the same origin.
if (name.is_null()) { if (name.is_null()) {
@ -250,10 +254,10 @@ bool CompilationCacheScript::HasOrigin(Handle<JSFunction> boilerplate,
// be cached in the same script generation. Currently the first use // be cached in the same script generation. Currently the first use
// will be cached, but subsequent code from different source / line // will be cached, but subsequent code from different source / line
// won't. // won't.
Handle<JSFunction> CompilationCacheScript::Lookup(Handle<String> source, Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
Handle<Object> name, Handle<Object> name,
int line_offset, int line_offset,
int column_offset) { int column_offset) {
Object* result = NULL; Object* result = NULL;
int generation; int generation;
@ -263,12 +267,13 @@ Handle<JSFunction> CompilationCacheScript::Lookup(Handle<String> source,
for (generation = 0; generation < generations(); generation++) { for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation); Handle<CompilationCacheTable> table = GetTable(generation);
Handle<Object> probe(table->Lookup(*source)); Handle<Object> probe(table->Lookup(*source));
if (probe->IsJSFunction()) { if (probe->IsSharedFunctionInfo()) {
Handle<JSFunction> boilerplate = Handle<JSFunction>::cast(probe); Handle<SharedFunctionInfo> function_info =
Handle<SharedFunctionInfo>::cast(probe);
// Break when we've found a suitable boilerplate function that // Break when we've found a suitable boilerplate function that
// matches the origin. // matches the origin.
if (HasOrigin(boilerplate, name, line_offset, column_offset)) { if (HasOrigin(function_info, name, line_offset, column_offset)) {
result = *boilerplate; result = *function_info;
break; break;
} }
} }
@ -290,38 +295,37 @@ Handle<JSFunction> CompilationCacheScript::Lookup(Handle<String> source,
// to see if we actually found a cached script. If so, we return a // to see if we actually found a cached script. If so, we return a
// handle created in the caller's handle scope. // handle created in the caller's handle scope.
if (result != NULL) { if (result != NULL) {
Handle<JSFunction> boilerplate(JSFunction::cast(result)); Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result));
ASSERT(HasOrigin(boilerplate, name, line_offset, column_offset)); ASSERT(HasOrigin(shared, name, line_offset, column_offset));
// If the script was found in a later generation, we promote it to // If the script was found in a later generation, we promote it to
// the first generation to let it survive longer in the cache. // the first generation to let it survive longer in the cache.
if (generation != 0) Put(source, boilerplate); if (generation != 0) Put(source, shared);
Counters::compilation_cache_hits.Increment(); Counters::compilation_cache_hits.Increment();
return boilerplate; return shared;
} else { } else {
Counters::compilation_cache_misses.Increment(); Counters::compilation_cache_misses.Increment();
return Handle<JSFunction>::null(); return Handle<SharedFunctionInfo>::null();
} }
} }
Handle<CompilationCacheTable> CompilationCacheScript::TablePut( Handle<CompilationCacheTable> CompilationCacheScript::TablePut(
Handle<String> source, Handle<String> source,
Handle<JSFunction> boilerplate) { Handle<SharedFunctionInfo> function_info) {
CALL_HEAP_FUNCTION(GetFirstTable()->Put(*source, *boilerplate), CALL_HEAP_FUNCTION(GetFirstTable()->Put(*source, *function_info),
CompilationCacheTable); CompilationCacheTable);
} }
void CompilationCacheScript::Put(Handle<String> source, void CompilationCacheScript::Put(Handle<String> source,
Handle<JSFunction> boilerplate) { Handle<SharedFunctionInfo> function_info) {
HandleScope scope; HandleScope scope;
ASSERT(boilerplate->IsBoilerplate()); SetFirstTable(TablePut(source, function_info));
SetFirstTable(TablePut(source, boilerplate));
} }
Handle<JSFunction> CompilationCacheEval::Lookup(Handle<String> source, Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
Handle<Context> context) { Handle<String> source, Handle<Context> context) {
// Make sure not to leak the table into the surrounding handle // Make sure not to leak the table into the surrounding handle
// scope. Otherwise, we risk keeping old tables around even after // scope. Otherwise, we risk keeping old tables around even after
// having cleared the cache. // having cleared the cache.
@ -331,21 +335,22 @@ Handle<JSFunction> CompilationCacheEval::Lookup(Handle<String> source,
for (generation = 0; generation < generations(); generation++) { for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation); Handle<CompilationCacheTable> table = GetTable(generation);
result = table->LookupEval(*source, *context); result = table->LookupEval(*source, *context);
if (result->IsJSFunction()) { if (result->IsSharedFunctionInfo()) {
break; break;
} }
} }
} }
if (result->IsJSFunction()) { if (result->IsSharedFunctionInfo()) {
Handle<JSFunction> boilerplate(JSFunction::cast(result)); Handle<SharedFunctionInfo>
function_info(SharedFunctionInfo::cast(result));
if (generation != 0) { if (generation != 0) {
Put(source, context, boilerplate); Put(source, context, function_info);
} }
Counters::compilation_cache_hits.Increment(); Counters::compilation_cache_hits.Increment();
return boilerplate; return function_info;
} else { } else {
Counters::compilation_cache_misses.Increment(); Counters::compilation_cache_misses.Increment();
return Handle<JSFunction>::null(); return Handle<SharedFunctionInfo>::null();
} }
} }
@ -353,18 +358,19 @@ Handle<JSFunction> CompilationCacheEval::Lookup(Handle<String> source,
Handle<CompilationCacheTable> CompilationCacheEval::TablePut( Handle<CompilationCacheTable> CompilationCacheEval::TablePut(
Handle<String> source, Handle<String> source,
Handle<Context> context, Handle<Context> context,
Handle<JSFunction> boilerplate) { Handle<SharedFunctionInfo> function_info) {
CALL_HEAP_FUNCTION(GetFirstTable()->PutEval(*source, *context, *boilerplate), CALL_HEAP_FUNCTION(GetFirstTable()->PutEval(*source,
*context,
*function_info),
CompilationCacheTable); CompilationCacheTable);
} }
void CompilationCacheEval::Put(Handle<String> source, void CompilationCacheEval::Put(Handle<String> source,
Handle<Context> context, Handle<Context> context,
Handle<JSFunction> boilerplate) { Handle<SharedFunctionInfo> function_info) {
HandleScope scope; HandleScope scope;
ASSERT(boilerplate->IsBoilerplate()); SetFirstTable(TablePut(source, context, function_info));
SetFirstTable(TablePut(source, context, boilerplate));
} }
@ -415,26 +421,26 @@ void CompilationCacheRegExp::Put(Handle<String> source,
} }
Handle<JSFunction> CompilationCache::LookupScript(Handle<String> source, Handle<SharedFunctionInfo> CompilationCache::LookupScript(Handle<String> source,
Handle<Object> name, Handle<Object> name,
int line_offset, int line_offset,
int column_offset) { int column_offset) {
if (!IsEnabled()) { if (!IsEnabled()) {
return Handle<JSFunction>::null(); return Handle<SharedFunctionInfo>::null();
} }
return script.Lookup(source, name, line_offset, column_offset); return script.Lookup(source, name, line_offset, column_offset);
} }
Handle<JSFunction> CompilationCache::LookupEval(Handle<String> source, Handle<SharedFunctionInfo> CompilationCache::LookupEval(Handle<String> source,
Handle<Context> context, Handle<Context> context,
bool is_global) { bool is_global) {
if (!IsEnabled()) { if (!IsEnabled()) {
return Handle<JSFunction>::null(); return Handle<SharedFunctionInfo>::null();
} }
Handle<JSFunction> result; Handle<SharedFunctionInfo> result;
if (is_global) { if (is_global) {
result = eval_global.Lookup(source, context); result = eval_global.Lookup(source, context);
} else { } else {
@ -455,30 +461,28 @@ Handle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
void CompilationCache::PutScript(Handle<String> source, void CompilationCache::PutScript(Handle<String> source,
Handle<JSFunction> boilerplate) { Handle<SharedFunctionInfo> function_info) {
if (!IsEnabled()) { if (!IsEnabled()) {
return; return;
} }
ASSERT(boilerplate->IsBoilerplate()); script.Put(source, function_info);
script.Put(source, boilerplate);
} }
void CompilationCache::PutEval(Handle<String> source, void CompilationCache::PutEval(Handle<String> source,
Handle<Context> context, Handle<Context> context,
bool is_global, bool is_global,
Handle<JSFunction> boilerplate) { Handle<SharedFunctionInfo> function_info) {
if (!IsEnabled()) { if (!IsEnabled()) {
return; return;
} }
HandleScope scope; HandleScope scope;
ASSERT(boilerplate->IsBoilerplate());
if (is_global) { if (is_global) {
eval_global.Put(source, context, boilerplate); eval_global.Put(source, context, function_info);
} else { } else {
eval_contextual.Put(source, context, boilerplate); eval_contextual.Put(source, context, function_info);
} }
} }

18
deps/v8/src/compilation-cache.h

@ -40,17 +40,17 @@ class CompilationCache {
// Finds the script function boilerplate for a source // Finds the script function boilerplate for a source
// string. Returns an empty handle if the cache doesn't contain a // string. Returns an empty handle if the cache doesn't contain a
// script for the given source string with the right origin. // script for the given source string with the right origin.
static Handle<JSFunction> LookupScript(Handle<String> source, static Handle<SharedFunctionInfo> LookupScript(Handle<String> source,
Handle<Object> name, Handle<Object> name,
int line_offset, int line_offset,
int column_offset); int column_offset);
// Finds the function boilerplate for a source string for eval in a // Finds the function boilerplate for a source string for eval in a
// given context. Returns an empty handle if the cache doesn't // given context. Returns an empty handle if the cache doesn't
// contain a script for the given source string. // contain a script for the given source string.
static Handle<JSFunction> LookupEval(Handle<String> source, static Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
Handle<Context> context, Handle<Context> context,
bool is_global); bool is_global);
// Returns the regexp data associated with the given regexp if it // Returns the regexp data associated with the given regexp if it
// is in cache, otherwise an empty handle. // is in cache, otherwise an empty handle.
@ -60,14 +60,14 @@ class CompilationCache {
// Associate the (source, kind) pair to the boilerplate. This may // Associate the (source, kind) pair to the boilerplate. This may
// overwrite an existing mapping. // overwrite an existing mapping.
static void PutScript(Handle<String> source, static void PutScript(Handle<String> source,
Handle<JSFunction> boilerplate); Handle<SharedFunctionInfo> function_info);
// Associate the (source, context->closure()->shared(), kind) triple // Associate the (source, context->closure()->shared(), kind) triple
// with the boilerplate. This may overwrite an existing mapping. // with the boilerplate. This may overwrite an existing mapping.
static void PutEval(Handle<String> source, static void PutEval(Handle<String> source,
Handle<Context> context, Handle<Context> context,
bool is_global, bool is_global,
Handle<JSFunction> boilerplate); Handle<SharedFunctionInfo> function_info);
// Associate the (source, flags) pair to the given regexp data. // Associate the (source, flags) pair to the given regexp data.
// This may overwrite an existing mapping. // This may overwrite an existing mapping.

198
deps/v8/src/compiler.cc

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -89,23 +89,33 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
} }
if (FLAG_use_flow_graph) { if (FLAG_use_flow_graph) {
FlowGraphBuilder builder; int variable_count =
function->num_parameters() + function->scope()->num_stack_slots();
FlowGraphBuilder builder(variable_count);
builder.Build(function); builder.Build(function);
if (!builder.HasStackOverflow()) { if (!builder.HasStackOverflow()) {
int variable_count = if (variable_count > 0) {
function->num_parameters() + function->scope()->num_stack_slots();
if (variable_count > 0 && builder.definitions()->length() > 0) {
ReachingDefinitions rd(builder.postorder(), ReachingDefinitions rd(builder.postorder(),
builder.definitions(), builder.body_definitions(),
variable_count); variable_count);
rd.Compute(); rd.Compute();
TypeAnalyzer ta(builder.postorder(),
builder.body_definitions(),
variable_count,
function->num_parameters());
ta.Compute();
MarkLiveCode(builder.preorder(),
builder.body_definitions(),
variable_count);
} }
} }
#ifdef DEBUG #ifdef DEBUG
if (FLAG_print_graph_text && !builder.HasStackOverflow()) { if (FLAG_print_graph_text && !builder.HasStackOverflow()) {
builder.graph()->PrintText(builder.postorder()); builder.graph()->PrintText(function, builder.postorder());
} }
#endif #endif
} }
@ -156,13 +166,13 @@ Handle<Code> MakeCodeForLiveEdit(CompilationInfo* info) {
#endif #endif
static Handle<JSFunction> MakeFunction(bool is_global, static Handle<SharedFunctionInfo> MakeFunctionInfo(bool is_global,
bool is_eval, bool is_eval,
Compiler::ValidationState validate, Compiler::ValidationState validate,
Handle<Script> script, Handle<Script> script,
Handle<Context> context, Handle<Context> context,
v8::Extension* extension, v8::Extension* extension,
ScriptDataImpl* pre_data) { ScriptDataImpl* pre_data) {
CompilationZoneScope zone_scope(DELETE_ON_EXIT); CompilationZoneScope zone_scope(DELETE_ON_EXIT);
PostponeInterruptsScope postpone; PostponeInterruptsScope postpone;
@ -204,7 +214,7 @@ static Handle<JSFunction> MakeFunction(bool is_global,
// Check for parse errors. // Check for parse errors.
if (lit == NULL) { if (lit == NULL) {
ASSERT(Top::has_pending_exception()); ASSERT(Top::has_pending_exception());
return Handle<JSFunction>::null(); return Handle<SharedFunctionInfo>::null();
} }
// Measure how long it takes to do the compilation; only take the // Measure how long it takes to do the compilation; only take the
@ -222,7 +232,7 @@ static Handle<JSFunction> MakeFunction(bool is_global,
// Check for stack-overflow exceptions. // Check for stack-overflow exceptions.
if (code.is_null()) { if (code.is_null()) {
Top::StackOverflow(); Top::StackOverflow();
return Handle<JSFunction>::null(); return Handle<SharedFunctionInfo>::null();
} }
#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT #if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
@ -248,38 +258,39 @@ static Handle<JSFunction> MakeFunction(bool is_global,
#endif #endif
// Allocate function. // Allocate function.
Handle<JSFunction> fun = Handle<SharedFunctionInfo> result =
Factory::NewFunctionBoilerplate(lit->name(), Factory::NewSharedFunctionInfo(lit->name(),
lit->materialized_literal_count(), lit->materialized_literal_count(),
code); code);
ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position()); ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
Compiler::SetFunctionInfo(fun, lit, true, script); Compiler::SetFunctionInfo(result, lit, true, script);
// Hint to the runtime system used when allocating space for initial // Hint to the runtime system used when allocating space for initial
// property space by setting the expected number of properties for // property space by setting the expected number of properties for
// the instances of the function. // the instances of the function.
SetExpectedNofPropertiesFromEstimate(fun, lit->expected_property_count()); SetExpectedNofPropertiesFromEstimate(result, lit->expected_property_count());
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger // Notify debugger
Debugger::OnAfterCompile(script, Debugger::NO_AFTER_COMPILE_FLAGS); Debugger::OnAfterCompile(script, Debugger::NO_AFTER_COMPILE_FLAGS);
#endif #endif
return fun; return result;
} }
static StaticResource<SafeStringInputBuffer> safe_string_input_buffer; static StaticResource<SafeStringInputBuffer> safe_string_input_buffer;
Handle<JSFunction> Compiler::Compile(Handle<String> source, Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
Handle<Object> script_name, Handle<Object> script_name,
int line_offset, int column_offset, int line_offset,
v8::Extension* extension, int column_offset,
ScriptDataImpl* input_pre_data, v8::Extension* extension,
Handle<Object> script_data, ScriptDataImpl* input_pre_data,
NativesFlag natives) { Handle<Object> script_data,
NativesFlag natives) {
int source_length = source->length(); int source_length = source->length();
Counters::total_load_size.Increment(source_length); Counters::total_load_size.Increment(source_length);
Counters::total_compile_size.Increment(source_length); Counters::total_compile_size.Increment(source_length);
@ -288,7 +299,7 @@ Handle<JSFunction> Compiler::Compile(Handle<String> source,
VMState state(COMPILER); VMState state(COMPILER);
// Do a lookup in the compilation cache but not for extensions. // Do a lookup in the compilation cache but not for extensions.
Handle<JSFunction> result; Handle<SharedFunctionInfo> result;
if (extension == NULL) { if (extension == NULL) {
result = CompilationCache::LookupScript(source, result = CompilationCache::LookupScript(source,
script_name, script_name,
@ -320,13 +331,13 @@ Handle<JSFunction> Compiler::Compile(Handle<String> source,
: *script_data); : *script_data);
// Compile the function and add it to the cache. // Compile the function and add it to the cache.
result = MakeFunction(true, result = MakeFunctionInfo(true,
false, false,
DONT_VALIDATE_JSON, DONT_VALIDATE_JSON,
script, script,
Handle<Context>::null(), Handle<Context>::null(),
extension, extension,
pre_data); pre_data);
if (extension == NULL && !result.is_null()) { if (extension == NULL && !result.is_null()) {
CompilationCache::PutScript(source, result); CompilationCache::PutScript(source, result);
} }
@ -342,10 +353,10 @@ Handle<JSFunction> Compiler::Compile(Handle<String> source,
} }
Handle<JSFunction> Compiler::CompileEval(Handle<String> source, Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
Handle<Context> context, Handle<Context> context,
bool is_global, bool is_global,
ValidationState validate) { ValidationState validate) {
// Note that if validation is required then no path through this // Note that if validation is required then no path through this
// function is allowed to return a value without validating that // function is allowed to return a value without validating that
// the input is legal json. // the input is legal json.
@ -361,20 +372,20 @@ Handle<JSFunction> Compiler::CompileEval(Handle<String> source,
// invoke the compiler and add the result to the cache. If we're // invoke the compiler and add the result to the cache. If we're
// evaluating json we bypass the cache since we can't be sure a // evaluating json we bypass the cache since we can't be sure a
// potential value in the cache has been validated. // potential value in the cache has been validated.
Handle<JSFunction> result; Handle<SharedFunctionInfo> result;
if (validate == DONT_VALIDATE_JSON) if (validate == DONT_VALIDATE_JSON)
result = CompilationCache::LookupEval(source, context, is_global); result = CompilationCache::LookupEval(source, context, is_global);
if (result.is_null()) { if (result.is_null()) {
// Create a script object describing the script to be compiled. // Create a script object describing the script to be compiled.
Handle<Script> script = Factory::NewScript(source); Handle<Script> script = Factory::NewScript(source);
result = MakeFunction(is_global, result = MakeFunctionInfo(is_global,
true, true,
validate, validate,
script, script,
context, context,
NULL, NULL,
NULL); NULL);
if (!result.is_null() && validate != VALIDATE_JSON) { if (!result.is_null() && validate != VALIDATE_JSON) {
// For json it's unlikely that we'll ever see exactly the same // For json it's unlikely that we'll ever see exactly the same
// string again so we don't use the compilation cache. // string again so we don't use the compilation cache.
@ -459,9 +470,9 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
} }
Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal, Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
Handle<Script> script, Handle<Script> script,
AstVisitor* caller) { AstVisitor* caller) {
#ifdef DEBUG #ifdef DEBUG
// We should not try to compile the same function literal more than // We should not try to compile the same function literal more than
// once. // once.
@ -484,7 +495,7 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
// The bodies of function literals have not yet been visited by // The bodies of function literals have not yet been visited by
// the AST optimizer/analyzer. // the AST optimizer/analyzer.
if (!Rewriter::Optimize(literal)) { if (!Rewriter::Optimize(literal)) {
return Handle<JSFunction>::null(); return Handle<SharedFunctionInfo>::null();
} }
if (literal->scope()->num_parameters() > 0 || if (literal->scope()->num_parameters() > 0 ||
@ -492,28 +503,38 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
AssignedVariablesAnalyzer ava(literal); AssignedVariablesAnalyzer ava(literal);
ava.Analyze(); ava.Analyze();
if (ava.HasStackOverflow()) { if (ava.HasStackOverflow()) {
return Handle<JSFunction>::null(); return Handle<SharedFunctionInfo>::null();
} }
} }
if (FLAG_use_flow_graph) { if (FLAG_use_flow_graph) {
FlowGraphBuilder builder;
builder.Build(literal);
if (!builder.HasStackOverflow()) {
int variable_count = int variable_count =
literal->num_parameters() + literal->scope()->num_stack_slots(); literal->num_parameters() + literal->scope()->num_stack_slots();
if (variable_count > 0 && builder.definitions()->length() > 0) { FlowGraphBuilder builder(variable_count);
ReachingDefinitions rd(builder.postorder(), builder.Build(literal);
builder.definitions(),
variable_count); if (!builder.HasStackOverflow()) {
rd.Compute(); if (variable_count > 0) {
ReachingDefinitions rd(builder.postorder(),
builder.body_definitions(),
variable_count);
rd.Compute();
TypeAnalyzer ta(builder.postorder(),
builder.body_definitions(),
variable_count,
literal->num_parameters());
ta.Compute();
MarkLiveCode(builder.preorder(),
builder.body_definitions(),
variable_count);
}
} }
}
#ifdef DEBUG #ifdef DEBUG
if (FLAG_print_graph_text && !builder.HasStackOverflow()) { if (FLAG_print_graph_text && !builder.HasStackOverflow()) {
builder.graph()->PrintText(builder.postorder()); builder.graph()->PrintText(literal, builder.postorder());
} }
#endif #endif
} }
@ -553,7 +574,7 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
// Check for stack-overflow exception. // Check for stack-overflow exception.
if (code.is_null()) { if (code.is_null()) {
caller->SetStackOverflow(); caller->SetStackOverflow();
return Handle<JSFunction>::null(); return Handle<SharedFunctionInfo>::null();
} }
// Function compilation complete. // Function compilation complete.
@ -569,22 +590,17 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
} }
// Create a boilerplate function. // Create a boilerplate function.
Handle<JSFunction> function = Handle<SharedFunctionInfo> result =
Factory::NewFunctionBoilerplate(literal->name(), Factory::NewSharedFunctionInfo(literal->name(),
literal->materialized_literal_count(), literal->materialized_literal_count(),
code); code);
SetFunctionInfo(function, literal, false, script); SetFunctionInfo(result, literal, false, script);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger that a new function has been added.
Debugger::OnNewFunction(function);
#endif
// Set the expected number of properties for instances and return // Set the expected number of properties for instances and return
// the resulting function. // the resulting function.
SetExpectedNofPropertiesFromEstimate(function, SetExpectedNofPropertiesFromEstimate(result,
literal->expected_property_count()); literal->expected_property_count());
return function; return result;
} }
@ -592,23 +608,23 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
// The start_position points to the first '(' character after the function name // The start_position points to the first '(' character after the function name
// in the full script source. When counting characters in the script source the // in the full script source. When counting characters in the script source the
// the first character is number 0 (not 1). // the first character is number 0 (not 1).
void Compiler::SetFunctionInfo(Handle<JSFunction> fun, void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
FunctionLiteral* lit, FunctionLiteral* lit,
bool is_toplevel, bool is_toplevel,
Handle<Script> script) { Handle<Script> script) {
fun->shared()->set_length(lit->num_parameters()); function_info->set_length(lit->num_parameters());
fun->shared()->set_formal_parameter_count(lit->num_parameters()); function_info->set_formal_parameter_count(lit->num_parameters());
fun->shared()->set_script(*script); function_info->set_script(*script);
fun->shared()->set_function_token_position(lit->function_token_position()); function_info->set_function_token_position(lit->function_token_position());
fun->shared()->set_start_position(lit->start_position()); function_info->set_start_position(lit->start_position());
fun->shared()->set_end_position(lit->end_position()); function_info->set_end_position(lit->end_position());
fun->shared()->set_is_expression(lit->is_expression()); function_info->set_is_expression(lit->is_expression());
fun->shared()->set_is_toplevel(is_toplevel); function_info->set_is_toplevel(is_toplevel);
fun->shared()->set_inferred_name(*lit->inferred_name()); function_info->set_inferred_name(*lit->inferred_name());
fun->shared()->SetThisPropertyAssignmentsInfo( function_info->SetThisPropertyAssignmentsInfo(
lit->has_only_simple_this_property_assignments(), lit->has_only_simple_this_property_assignments(),
*lit->this_property_assignments()); *lit->this_property_assignments());
fun->shared()->set_try_full_codegen(lit->try_full_codegen()); function_info->set_try_full_codegen(lit->try_full_codegen());
} }

41
deps/v8/src/compiler.h

@ -219,9 +219,9 @@ class CompilationInfo BASE_EMBEDDED {
// functions, they will be compiled and allocated as part of the compilation // functions, they will be compiled and allocated as part of the compilation
// of the source code. // of the source code.
// Please note this interface returns function boilerplates. // Please note this interface returns shared function infos.
// This means you need to call Factory::NewFunctionFromBoilerplate // This means you need to call Factory::NewFunctionFromSharedFunctionInfo
// before you have a real function with context. // before you have a real function with a context.
class Compiler : public AllStatic { class Compiler : public AllStatic {
public: public:
@ -232,34 +232,35 @@ class Compiler : public AllStatic {
// the return handle contains NULL. // the return handle contains NULL.
// Compile a String source within a context. // Compile a String source within a context.
static Handle<JSFunction> Compile(Handle<String> source, static Handle<SharedFunctionInfo> Compile(Handle<String> source,
Handle<Object> script_name, Handle<Object> script_name,
int line_offset, int column_offset, int line_offset,
v8::Extension* extension, int column_offset,
ScriptDataImpl* pre_data, v8::Extension* extension,
Handle<Object> script_data, ScriptDataImpl* pre_data,
NativesFlag is_natives_code); Handle<Object> script_data,
NativesFlag is_natives_code);
// Compile a String source within a context for Eval. // Compile a String source within a context for Eval.
static Handle<JSFunction> CompileEval(Handle<String> source, static Handle<SharedFunctionInfo> CompileEval(Handle<String> source,
Handle<Context> context, Handle<Context> context,
bool is_global, bool is_global,
ValidationState validation); ValidationState validation);
// Compile from function info (used for lazy compilation). Returns // Compile from function info (used for lazy compilation). Returns
// true on success and false if the compilation resulted in a stack // true on success and false if the compilation resulted in a stack
// overflow. // overflow.
static bool CompileLazy(CompilationInfo* info); static bool CompileLazy(CompilationInfo* info);
// Compile a function boilerplate object (the function is possibly // Compile a shared function info object (the function is possibly
// lazily compiled). Called recursively from a backend code // lazily compiled). Called recursively from a backend code
// generator 'caller' to build the boilerplate. // generator 'caller' to build the shared function info.
static Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node, static Handle<SharedFunctionInfo> BuildFunctionInfo(FunctionLiteral* node,
Handle<Script> script, Handle<Script> script,
AstVisitor* caller); AstVisitor* caller);
// Set the function info for a newly compiled function. // Set the function info for a newly compiled function.
static void SetFunctionInfo(Handle<JSFunction> fun, static void SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
FunctionLiteral* lit, FunctionLiteral* lit,
bool is_toplevel, bool is_toplevel,
Handle<Script> script); Handle<Script> script);

2
deps/v8/src/contexts.h

@ -86,7 +86,6 @@ enum ContextLookupFlags {
V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \ V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \ V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \
call_as_constructor_delegate) \ call_as_constructor_delegate) \
V(EMPTY_SCRIPT_INDEX, Script, empty_script) \
V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \ V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \ V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \ V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
@ -207,7 +206,6 @@ class Context: public FixedArray {
RUNTIME_CONTEXT_INDEX, RUNTIME_CONTEXT_INDEX,
CALL_AS_FUNCTION_DELEGATE_INDEX, CALL_AS_FUNCTION_DELEGATE_INDEX,
CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, CALL_AS_CONSTRUCTOR_DELEGATE_INDEX,
EMPTY_SCRIPT_INDEX,
SCRIPT_FUNCTION_INDEX, SCRIPT_FUNCTION_INDEX,
OPAQUE_REFERENCE_FUNCTION_INDEX, OPAQUE_REFERENCE_FUNCTION_INDEX,
CONTEXT_EXTENSION_FUNCTION_INDEX, CONTEXT_EXTENSION_FUNCTION_INDEX,

18
deps/v8/src/conversions-inl.h

@ -41,24 +41,6 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// The fast double-to-int conversion routine does not guarantee
// rounding towards zero.
static inline int FastD2I(double x) {
#ifdef __USE_ISOC99
// The ISO C99 standard defines the lrint() function which rounds a
// double to an integer according to the current rounding direction.
return lrint(x);
#else
// This is incredibly slow on Intel x86. The reason is that rounding
// towards zero is implied by the C standard. This means that the
// status register of the FPU has to be changed with the 'fldcw'
// instruction. This completely stalls the pipeline and takes many
// hundreds of clock cycles.
return static_cast<int>(x);
#endif
}
// The fast double-to-unsigned-int conversion routine does not guarantee // The fast double-to-unsigned-int conversion routine does not guarantee
// rounding towards zero, or any reasonable value if the argument is larger // rounding towards zero, or any reasonable value if the argument is larger
// than what fits in an unsigned 32-bit integer. // than what fits in an unsigned 32-bit integer.

7
deps/v8/src/conversions.h

@ -36,7 +36,12 @@ namespace internal {
// rounding towards zero. // rounding towards zero.
// The result is unspecified if x is infinite or NaN, or if the rounded // The result is unspecified if x is infinite or NaN, or if the rounded
// integer value is outside the range of type int. // integer value is outside the range of type int.
static inline int FastD2I(double x); static inline int FastD2I(double x) {
// The static_cast convertion from double to int used to be slow, but
// as new benchmarks show, now it is much faster than lrint().
return static_cast<int>(x);
}
static inline unsigned int FastD2UI(double x); static inline unsigned int FastD2UI(double x);

2
deps/v8/src/cpu-profiler.cc

@ -176,7 +176,6 @@ bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
void ProfilerEventsProcessor::Run() { void ProfilerEventsProcessor::Run() {
ticks_buffer_.SetUpConsumer();
unsigned dequeue_order = 0; unsigned dequeue_order = 0;
running_ = true; running_ = true;
@ -194,7 +193,6 @@ void ProfilerEventsProcessor::Run() {
ticks_buffer_.FlushResidualRecords(); ticks_buffer_.FlushResidualRecords();
// Perform processing until we have tick events, skip remaining code events. // Perform processing until we have tick events, skip remaining code events.
while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { } while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
ticks_buffer_.TearDownConsumer();
} }

3
deps/v8/src/cpu-profiler.h

@ -154,14 +154,11 @@ class ProfilerEventsProcessor : public Thread {
void FunctionMoveEvent(Address from, Address to); void FunctionMoveEvent(Address from, Address to);
void FunctionDeleteEvent(Address from); void FunctionDeleteEvent(Address from);
// Tick sampler registration. Called by sampler thread or signal handler.
inline void SetUpSamplesProducer() { ticks_buffer_.SetUpProducer(); }
// Tick sample events are filled directly in the buffer of the circular // Tick sample events are filled directly in the buffer of the circular
// queue (because the structure is of fixed width, but usually not all // queue (because the structure is of fixed width, but usually not all
// stack frame entries are filled.) This method returns a pointer to the // stack frame entries are filled.) This method returns a pointer to the
// next record of the buffer. // next record of the buffer.
INLINE(TickSample* TickSampleEvent()); INLINE(TickSample* TickSampleEvent());
inline void TearDownSamplesProducer() { ticks_buffer_.TearDownProducer(); }
private: private:
union CodeEventsContainer { union CodeEventsContainer {

9
deps/v8/src/d8.cc

@ -467,9 +467,12 @@ void Shell::Initialize() {
// Mark the d8 shell script as native to avoid it showing up as normal source // Mark the d8 shell script as native to avoid it showing up as normal source
// in the debugger. // in the debugger.
i::Handle<i::JSFunction> script_fun = Utils::OpenHandle(*script); i::Handle<i::Object> compiled_script = Utils::OpenHandle(*script);
i::Handle<i::Script> script_object = i::Handle<i::Script> script_object = compiled_script->IsJSFunction()
i::Handle<i::Script>(i::Script::cast(script_fun->shared()->script())); ? i::Handle<i::Script>(i::Script::cast(
i::JSFunction::cast(*compiled_script)->shared()->script()))
: i::Handle<i::Script>(i::Script::cast(
i::SharedFunctionInfo::cast(*compiled_script)->script()));
script_object->set_type(i::Smi::FromInt(i::Script::TYPE_NATIVE)); script_object->set_type(i::Smi::FromInt(i::Script::TYPE_NATIVE));
// Create the evaluation context // Create the evaluation context

289
deps/v8/src/data-flow.cc

@ -195,6 +195,81 @@ void FlowGraphBuilder::Build(FunctionLiteral* lit) {
} }
// This function peels off one iteration of a for-loop. The return value
// is either a block statement containing the peeled loop or NULL in case
// there is a stack overflow.
static Statement* PeelForLoop(ForStatement* stmt) {
// Mark this for-statement as processed.
stmt->set_peel_this_loop(false);
// Create new block containing the init statement of the for-loop and
// an if-statement containing the peeled iteration and the original
// loop without the init-statement.
Block* block = new Block(NULL, 2, false);
if (stmt->init() != NULL) {
Statement* init = stmt->init();
// The init statement gets the statement position of the for-loop
// to make debugging of peeled loops possible.
init->set_statement_pos(stmt->statement_pos());
block->AddStatement(init);
}
// Copy the condition.
CopyAstVisitor copy_visitor;
Expression* cond_copy = stmt->cond() != NULL
? copy_visitor.DeepCopyExpr(stmt->cond())
: new Literal(Factory::true_value());
if (copy_visitor.HasStackOverflow()) return NULL;
// Construct a block with the peeled body and the rest of the for-loop.
Statement* body_copy = copy_visitor.DeepCopyStmt(stmt->body());
if (copy_visitor.HasStackOverflow()) return NULL;
Statement* next_copy = stmt->next() != NULL
? copy_visitor.DeepCopyStmt(stmt->next())
: new EmptyStatement();
if (copy_visitor.HasStackOverflow()) return NULL;
Block* peeled_body = new Block(NULL, 3, false);
peeled_body->AddStatement(body_copy);
peeled_body->AddStatement(next_copy);
peeled_body->AddStatement(stmt);
// Remove the duplicated init statement from the for-statement.
stmt->set_init(NULL);
// Create new test at the top and add it to the newly created block.
IfStatement* test = new IfStatement(cond_copy,
peeled_body,
new EmptyStatement());
block->AddStatement(test);
return block;
}
void FlowGraphBuilder::VisitStatements(ZoneList<Statement*>* stmts) {
for (int i = 0, len = stmts->length(); i < len; i++) {
stmts->at(i) = ProcessStatement(stmts->at(i));
}
}
Statement* FlowGraphBuilder::ProcessStatement(Statement* stmt) {
if (FLAG_loop_peeling &&
stmt->AsForStatement() != NULL &&
stmt->AsForStatement()->peel_this_loop()) {
Statement* tmp_stmt = PeelForLoop(stmt->AsForStatement());
if (tmp_stmt == NULL) {
SetStackOverflow();
} else {
stmt = tmp_stmt;
}
}
Visit(stmt);
return stmt;
}
void FlowGraphBuilder::VisitDeclaration(Declaration* decl) { void FlowGraphBuilder::VisitDeclaration(Declaration* decl) {
UNREACHABLE(); UNREACHABLE();
} }
@ -221,11 +296,11 @@ void FlowGraphBuilder::VisitIfStatement(IfStatement* stmt) {
BranchNode* branch = new BranchNode(); BranchNode* branch = new BranchNode();
FlowGraph original = graph_; FlowGraph original = graph_;
graph_ = FlowGraph::Empty(); graph_ = FlowGraph::Empty();
Visit(stmt->then_statement()); stmt->set_then_statement(ProcessStatement(stmt->then_statement()));
FlowGraph left = graph_; FlowGraph left = graph_;
graph_ = FlowGraph::Empty(); graph_ = FlowGraph::Empty();
Visit(stmt->else_statement()); stmt->set_else_statement(ProcessStatement(stmt->else_statement()));
if (HasStackOverflow()) return; if (HasStackOverflow()) return;
JoinNode* join = new JoinNode(); JoinNode* join = new JoinNode();
@ -275,7 +350,7 @@ void FlowGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
void FlowGraphBuilder::VisitForStatement(ForStatement* stmt) { void FlowGraphBuilder::VisitForStatement(ForStatement* stmt) {
if (stmt->init() != NULL) Visit(stmt->init()); if (stmt->init() != NULL) stmt->set_init(ProcessStatement(stmt->init()));
JoinNode* join = new JoinNode(); JoinNode* join = new JoinNode();
FlowGraph original = graph_; FlowGraph original = graph_;
@ -285,9 +360,9 @@ void FlowGraphBuilder::VisitForStatement(ForStatement* stmt) {
BranchNode* branch = new BranchNode(); BranchNode* branch = new BranchNode();
FlowGraph condition = graph_; FlowGraph condition = graph_;
graph_ = FlowGraph::Empty(); graph_ = FlowGraph::Empty();
Visit(stmt->body()); stmt->set_body(ProcessStatement(stmt->body()));
if (stmt->next() != NULL) Visit(stmt->next()); if (stmt->next() != NULL) stmt->set_next(ProcessStatement(stmt->next()));
if (HasStackOverflow()) return; if (HasStackOverflow()) return;
original.Loop(join, &condition, branch, &graph_); original.Loop(join, &condition, branch, &graph_);
@ -320,8 +395,8 @@ void FlowGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
} }
void FlowGraphBuilder::VisitFunctionBoilerplateLiteral( void FlowGraphBuilder::VisitSharedFunctionInfoLiteral(
FunctionBoilerplateLiteral* expr) { SharedFunctionInfoLiteral* expr) {
SetStackOverflow(); SetStackOverflow();
} }
@ -376,8 +451,10 @@ void FlowGraphBuilder::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) Visit(expr->target()); if (expr->is_compound()) Visit(expr->target());
Visit(expr->value()); Visit(expr->value());
if (var->IsStackAllocated()) { if (var->IsStackAllocated()) {
expr->set_num(definitions_.length()); // The first definition in the body is numbered n, where n is the
definitions_.Add(expr); // number of parameters and stack-allocated locals.
expr->set_num(body_definitions_.length() + variable_count_);
body_definitions_.Add(expr);
} }
} else if (prop != NULL) { } else if (prop != NULL) {
@ -454,8 +531,10 @@ void FlowGraphBuilder::VisitCountOperation(CountOperation* expr) {
Visit(expr->expression()); Visit(expr->expression());
Variable* var = expr->expression()->AsVariableProxy()->AsVariable(); Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
if (var != NULL && var->IsStackAllocated()) { if (var != NULL && var->IsStackAllocated()) {
expr->set_num(definitions_.length()); // The first definition in the body is numbered n, where n is the number
definitions_.Add(expr); // of parameters and stack-allocated locals.
expr->set_num(body_definitions_.length() + variable_count_);
body_definitions_.Add(expr);
} }
if (HasStackOverflow()) return; if (HasStackOverflow()) return;
@ -638,8 +717,8 @@ void AstLabeler::VisitFunctionLiteral(FunctionLiteral* expr) {
} }
void AstLabeler::VisitFunctionBoilerplateLiteral( void AstLabeler::VisitSharedFunctionInfoLiteral(
FunctionBoilerplateLiteral* expr) { SharedFunctionInfoLiteral* expr) {
UNREACHABLE(); UNREACHABLE();
} }
@ -1015,8 +1094,8 @@ void AssignedVariablesAnalyzer::VisitFunctionLiteral(FunctionLiteral* expr) {
} }
void AssignedVariablesAnalyzer::VisitFunctionBoilerplateLiteral( void AssignedVariablesAnalyzer::VisitSharedFunctionInfoLiteral(
FunctionBoilerplateLiteral* expr) { SharedFunctionInfoLiteral* expr) {
// Nothing to do. // Nothing to do.
ASSERT(av_.IsEmpty()); ASSERT(av_.IsEmpty());
} }
@ -1342,9 +1421,9 @@ void TextInstructionPrinter::VisitFunctionLiteral(FunctionLiteral* expr) {
} }
void TextInstructionPrinter::VisitFunctionBoilerplateLiteral( void TextInstructionPrinter::VisitSharedFunctionInfoLiteral(
FunctionBoilerplateLiteral* expr) { SharedFunctionInfoLiteral* expr) {
PrintF("FunctionBoilerplateLiteral"); PrintF("SharedFunctionInfoLiteral");
} }
@ -1584,9 +1663,16 @@ void BlockNode::PrintText() {
PrintF("L%d: Block\n", number()); PrintF("L%d: Block\n", number());
TextInstructionPrinter printer; TextInstructionPrinter printer;
for (int i = 0, len = instructions_.length(); i < len; i++) { for (int i = 0, len = instructions_.length(); i < len; i++) {
AstNode* instr = instructions_[i];
// Print a star next to dead instructions.
if (instr->AsExpression() != NULL && instr->AsExpression()->is_live()) {
PrintF(" ");
} else {
PrintF("* ");
}
PrintF("%d ", printer.NextNumber()); PrintF("%d ", printer.NextNumber());
printer.Visit(instructions_[i]); printer.Visit(instr);
printer.AssignNumber(instructions_[i]); printer.AssignNumber(instr);
PrintF("\n"); PrintF("\n");
} }
PrintF("goto L%d\n\n", successor_->number()); PrintF("goto L%d\n\n", successor_->number());
@ -1611,8 +1697,9 @@ void JoinNode::PrintText() {
} }
void FlowGraph::PrintText(ZoneList<Node*>* postorder) { void FlowGraph::PrintText(FunctionLiteral* fun, ZoneList<Node*>* postorder) {
PrintF("\n========\n"); PrintF("\n========\n");
PrintF("name = %s\n", *fun->name()->ToCString());
// Number nodes and instructions in reverse postorder. // Number nodes and instructions in reverse postorder.
node_count = 0; node_count = 0;
@ -1664,11 +1751,16 @@ void BlockNode::InitializeReachingDefinitions(int definition_count,
int variable_count = variables->length(); int variable_count = variables->length();
rd_.Initialize(definition_count); rd_.Initialize(definition_count);
// The RD_in set for the entry node has a definition for each parameter
// and local.
if (predecessor_ == NULL) {
for (int i = 0; i < variable_count; i++) rd_.rd_in()->Add(i);
}
for (int i = 0; i < instruction_count; i++) { for (int i = 0; i < instruction_count; i++) {
Expression* expr = instructions_[i]->AsExpression(); Expression* expr = instructions_[i]->AsExpression();
if (expr == NULL) continue; if (expr == NULL) continue;
Variable* var = expr->AssignedVar(); Variable* var = expr->AssignedVariable();
if (var == NULL || !var->IsStackAllocated()) continue; if (var == NULL || !var->IsStackAllocated()) continue;
// All definitions of this variable are killed. // All definitions of this variable are killed.
@ -1845,7 +1937,7 @@ void BlockNode::PropagateReachingDefinitions(List<BitVector*>* variables) {
// It may instead (or also) be a definition. If so update the running // It may instead (or also) be a definition. If so update the running
// value of reaching definitions for the block. // value of reaching definitions for the block.
Variable* var = expr->AssignedVar(); Variable* var = expr->AssignedVariable();
if (var == NULL || !var->IsStackAllocated()) continue; if (var == NULL || !var->IsStackAllocated()) continue;
// All definitions of this variable are killed. // All definitions of this variable are killed.
@ -1859,40 +1951,25 @@ void BlockNode::PropagateReachingDefinitions(List<BitVector*>* variables) {
void ReachingDefinitions::Compute() { void ReachingDefinitions::Compute() {
ASSERT(!definitions_->is_empty()); // The definitions in the body plus an implicit definition for each
// variable at function entry.
int variable_count = variables_.length(); int definition_count = body_definitions_->length() + variable_count_;
int definition_count = definitions_->length();
int node_count = postorder_->length(); int node_count = postorder_->length();
// Step 1: For each variable, identify the set of all its definitions in // Step 1: For each stack-allocated variable, identify the set of all its
// the body. // definitions.
for (int i = 0; i < definition_count; i++) { List<BitVector*> variables;
Variable* var = definitions_->at(i)->AssignedVar(); for (int i = 0; i < variable_count_; i++) {
variables_[IndexFor(var, variable_count)]->Add(i); // Add the initial definition for each variable.
} BitVector* initial = new BitVector(definition_count);
initial->Add(i);
if (FLAG_print_graph_text) { variables.Add(initial);
for (int i = 0; i < variable_count; i++) { }
BitVector* def_set = variables_[i]; for (int i = 0, len = body_definitions_->length(); i < len; i++) {
if (!def_set->IsEmpty()) { // Account for each definition in the body as a definition of the
// At least one definition. // defined variable.
bool first = true; Variable* var = body_definitions_->at(i)->AssignedVariable();
for (int j = 0; j < definition_count; j++) { variables[IndexFor(var, variable_count_)]->Add(i + variable_count_);
if (def_set->Contains(j)) {
if (first) {
Variable* var = definitions_->at(j)->AssignedVar();
ASSERT(var != NULL);
PrintF("Def[%s] = {%d", *var->name()->ToCString(), j);
first = false;
} else {
PrintF(",%d", j);
}
}
}
PrintF("}\n");
}
}
} }
// Step 2: Compute KILL and GEN for each block node, initialize RD_in for // Step 2: Compute KILL and GEN for each block node, initialize RD_in for
@ -1902,7 +1979,7 @@ void ReachingDefinitions::Compute() {
WorkList<Node> worklist(node_count); WorkList<Node> worklist(node_count);
for (int i = node_count - 1; i >= 0; i--) { for (int i = node_count - 1; i >= 0; i--) {
postorder_->at(i)->InitializeReachingDefinitions(definition_count, postorder_->at(i)->InitializeReachingDefinitions(definition_count,
&variables_, &variables,
&worklist, &worklist,
mark); mark);
} }
@ -1919,7 +1996,105 @@ void ReachingDefinitions::Compute() {
// Step 4: Based on RD_in for block nodes, propagate reaching definitions // Step 4: Based on RD_in for block nodes, propagate reaching definitions
// to all variable uses in the block. // to all variable uses in the block.
for (int i = 0; i < node_count; i++) { for (int i = 0; i < node_count; i++) {
postorder_->at(i)->PropagateReachingDefinitions(&variables_); postorder_->at(i)->PropagateReachingDefinitions(&variables);
}
}
bool TypeAnalyzer::IsPrimitiveDef(int def_num) {
if (def_num < param_count_) return false;
if (def_num < variable_count_) return true;
return body_definitions_->at(def_num - variable_count_)->IsPrimitive();
}
void TypeAnalyzer::Compute() {
bool changed;
int count = 0;
do {
changed = false;
if (FLAG_print_graph_text) {
PrintF("TypeAnalyzer::Compute - iteration %d\n", count++);
}
for (int i = postorder_->length() - 1; i >= 0; --i) {
Node* node = postorder_->at(i);
if (node->IsBlockNode()) {
BlockNode* block = BlockNode::cast(node);
for (int j = 0; j < block->instructions()->length(); j++) {
Expression* expr = block->instructions()->at(j)->AsExpression();
if (expr != NULL) {
// For variable uses: Compute new type from reaching definitions.
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->reaching_definitions() != NULL) {
BitVector* rd = proxy->reaching_definitions();
bool prim_type = true;
// TODO(fsc): A sparse set representation of reaching
// definitions would speed up iterating here.
for (int k = 0; k < rd->length(); k++) {
if (rd->Contains(k) && !IsPrimitiveDef(k)) {
prim_type = false;
break;
}
}
// Reset changed flag if new type information was computed.
if (prim_type != proxy->IsPrimitive()) {
changed = true;
proxy->SetIsPrimitive(prim_type);
}
}
}
}
}
}
} while (changed);
}
void Node::MarkCriticalInstructions(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
}
void BlockNode::MarkCriticalInstructions(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
for (int i = instructions_.length() - 1; i >= 0; i--) {
// Only expressions can appear in the flow graph for now.
Expression* expr = instructions_[i]->AsExpression();
if (expr != NULL && !expr->is_live() &&
(expr->is_loop_condition() || expr->IsCritical())) {
expr->mark_as_live();
expr->ProcessNonLiveChildren(stack, body_definitions, variable_count);
}
}
}
void MarkLiveCode(ZoneList<Node*>* nodes,
ZoneList<Expression*>* body_definitions,
int variable_count) {
List<AstNode*> stack(20);
// Mark the critical AST nodes as live; mark their dependencies and
// add them to the marking stack.
for (int i = nodes->length() - 1; i >= 0; i--) {
nodes->at(i)->MarkCriticalInstructions(&stack, body_definitions,
variable_count);
}
// Continue marking dependencies until no more.
while (!stack.is_empty()) {
// Only expressions can appear in the flow graph for now.
Expression* expr = stack.RemoveLast()->AsExpression();
if (expr != NULL) {
expr->ProcessNonLiveChildren(&stack, body_definitions, variable_count);
}
} }
} }

168
deps/v8/src/data-flow.h

@ -241,6 +241,12 @@ class Node: public ZoneObject {
virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark) = 0; virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark) = 0;
virtual void PropagateReachingDefinitions(List<BitVector*>* variables); virtual void PropagateReachingDefinitions(List<BitVector*>* variables);
// Functions used by dead-code elimination.
virtual void MarkCriticalInstructions(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
#ifdef DEBUG #ifdef DEBUG
void AssignNodeNumber(); void AssignNodeNumber();
void PrintReachingDefinitions(); void PrintReachingDefinitions();
@ -263,24 +269,24 @@ class ExitNode: public Node {
public: public:
ExitNode() : predecessors_(4) {} ExitNode() : predecessors_(4) {}
bool IsExitNode() { return true; } virtual bool IsExitNode() { return true; }
void AddPredecessor(Node* predecessor) { virtual void AddPredecessor(Node* predecessor) {
ASSERT(predecessor != NULL); ASSERT(predecessor != NULL);
predecessors_.Add(predecessor); predecessors_.Add(predecessor);
} }
void AddSuccessor(Node* successor) { UNREACHABLE(); } virtual void AddSuccessor(Node* successor) { UNREACHABLE(); }
void Traverse(bool mark, virtual void Traverse(bool mark,
ZoneList<Node*>* preorder, ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder); ZoneList<Node*>* postorder);
void ComputeRDOut(BitVector* result); virtual void ComputeRDOut(BitVector* result);
void UpdateRDIn(WorkList<Node>* worklist, bool mark); virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
#ifdef DEBUG #ifdef DEBUG
void PrintText(); virtual void PrintText();
#endif #endif
private: private:
@ -301,16 +307,18 @@ class BlockNode: public Node {
return reinterpret_cast<BlockNode*>(node); return reinterpret_cast<BlockNode*>(node);
} }
bool IsBlockNode() { return true; } virtual bool IsBlockNode() { return true; }
bool is_empty() { return instructions_.is_empty(); } bool is_empty() { return instructions_.is_empty(); }
void AddPredecessor(Node* predecessor) { ZoneList<AstNode*>* instructions() { return &instructions_; }
virtual void AddPredecessor(Node* predecessor) {
ASSERT(predecessor_ == NULL && predecessor != NULL); ASSERT(predecessor_ == NULL && predecessor != NULL);
predecessor_ = predecessor; predecessor_ = predecessor;
} }
void AddSuccessor(Node* successor) { virtual void AddSuccessor(Node* successor) {
ASSERT(successor_ == NULL && successor != NULL); ASSERT(successor_ == NULL && successor != NULL);
successor_ = successor; successor_ = successor;
} }
@ -319,20 +327,25 @@ class BlockNode: public Node {
instructions_.Add(instruction); instructions_.Add(instruction);
} }
void Traverse(bool mark, virtual void Traverse(bool mark,
ZoneList<Node*>* preorder, ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder); ZoneList<Node*>* postorder);
void InitializeReachingDefinitions(int definition_count, virtual void InitializeReachingDefinitions(int definition_count,
List<BitVector*>* variables, List<BitVector*>* variables,
WorkList<Node>* worklist, WorkList<Node>* worklist,
bool mark); bool mark);
void ComputeRDOut(BitVector* result); virtual void ComputeRDOut(BitVector* result);
void UpdateRDIn(WorkList<Node>* worklist, bool mark); virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
void PropagateReachingDefinitions(List<BitVector*>* variables); virtual void PropagateReachingDefinitions(List<BitVector*>* variables);
virtual void MarkCriticalInstructions(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
#ifdef DEBUG #ifdef DEBUG
void PrintText(); virtual void PrintText();
#endif #endif
private: private:
@ -349,14 +362,14 @@ class BranchNode: public Node {
public: public:
BranchNode() : predecessor_(NULL), successor0_(NULL), successor1_(NULL) {} BranchNode() : predecessor_(NULL), successor0_(NULL), successor1_(NULL) {}
bool IsBranchNode() { return true; } virtual bool IsBranchNode() { return true; }
void AddPredecessor(Node* predecessor) { virtual void AddPredecessor(Node* predecessor) {
ASSERT(predecessor_ == NULL && predecessor != NULL); ASSERT(predecessor_ == NULL && predecessor != NULL);
predecessor_ = predecessor; predecessor_ = predecessor;
} }
void AddSuccessor(Node* successor) { virtual void AddSuccessor(Node* successor) {
ASSERT(successor1_ == NULL && successor != NULL); ASSERT(successor1_ == NULL && successor != NULL);
if (successor0_ == NULL) { if (successor0_ == NULL) {
successor0_ = successor; successor0_ = successor;
@ -365,15 +378,15 @@ class BranchNode: public Node {
} }
} }
void Traverse(bool mark, virtual void Traverse(bool mark,
ZoneList<Node*>* preorder, ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder); ZoneList<Node*>* postorder);
void ComputeRDOut(BitVector* result); virtual void ComputeRDOut(BitVector* result);
void UpdateRDIn(WorkList<Node>* worklist, bool mark); virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
#ifdef DEBUG #ifdef DEBUG
void PrintText(); virtual void PrintText();
#endif #endif
private: private:
@ -395,27 +408,27 @@ class JoinNode: public Node {
return reinterpret_cast<JoinNode*>(node); return reinterpret_cast<JoinNode*>(node);
} }
bool IsJoinNode() { return true; } virtual bool IsJoinNode() { return true; }
void AddPredecessor(Node* predecessor) { virtual void AddPredecessor(Node* predecessor) {
ASSERT(predecessor != NULL); ASSERT(predecessor != NULL);
predecessors_.Add(predecessor); predecessors_.Add(predecessor);
} }
void AddSuccessor(Node* successor) { virtual void AddSuccessor(Node* successor) {
ASSERT(successor_ == NULL && successor != NULL); ASSERT(successor_ == NULL && successor != NULL);
successor_ = successor; successor_ = successor;
} }
void Traverse(bool mark, virtual void Traverse(bool mark,
ZoneList<Node*>* preorder, ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder); ZoneList<Node*>* postorder);
void ComputeRDOut(BitVector* result); virtual void ComputeRDOut(BitVector* result);
void UpdateRDIn(WorkList<Node>* worklist, bool mark); virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
#ifdef DEBUG #ifdef DEBUG
void PrintText(); virtual void PrintText();
#endif #endif
private: private:
@ -470,7 +483,7 @@ class FlowGraph BASE_EMBEDDED {
FlowGraph* body); FlowGraph* body);
#ifdef DEBUG #ifdef DEBUG
void PrintText(ZoneList<Node*>* postorder); void PrintText(FunctionLiteral* fun, ZoneList<Node*>* postorder);
#endif #endif
private: private:
@ -485,23 +498,29 @@ class FlowGraph BASE_EMBEDDED {
// traversal orders as a byproduct. // traversal orders as a byproduct.
class FlowGraphBuilder: public AstVisitor { class FlowGraphBuilder: public AstVisitor {
public: public:
FlowGraphBuilder() explicit FlowGraphBuilder(int variable_count)
: graph_(FlowGraph::Empty()), : graph_(FlowGraph::Empty()),
global_exit_(NULL), global_exit_(NULL),
preorder_(4), preorder_(4),
postorder_(4), postorder_(4),
definitions_(4) { variable_count_(variable_count),
body_definitions_(4) {
} }
void Build(FunctionLiteral* lit); void Build(FunctionLiteral* lit);
FlowGraph* graph() { return &graph_; } FlowGraph* graph() { return &graph_; }
ZoneList<Node*>* preorder() { return &preorder_; }
ZoneList<Node*>* postorder() { return &postorder_; } ZoneList<Node*>* postorder() { return &postorder_; }
ZoneList<Expression*>* definitions() { return &definitions_; } ZoneList<Expression*>* body_definitions() { return &body_definitions_; }
private: private:
ExitNode* global_exit() { return global_exit_; } ExitNode* global_exit() { return global_exit_; }
// Helpers to allow tranforming the ast during flow graph construction.
void VisitStatements(ZoneList<Statement*>* stmts);
Statement* ProcessStatement(Statement* stmt);
// AST node visit functions. // AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node); #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT) AST_NODE_LIST(DECLARE_VISIT)
@ -512,11 +531,13 @@ class FlowGraphBuilder: public AstVisitor {
ZoneList<Node*> preorder_; ZoneList<Node*> preorder_;
ZoneList<Node*> postorder_; ZoneList<Node*> postorder_;
// The flow graph builder collects a list of definitions (assignments and // The flow graph builder collects a list of explicit definitions
// count operations) to stack-allocated variables to use for reaching // (assignments and count operations) to stack-allocated variables to use
// definitions analysis. AST node numbers in the AST are used to refer // for reaching definitions analysis. It does not count the implicit
// into this list. // definition at function entry. AST node numbers in the AST are used to
ZoneList<Expression*> definitions_; // refer into this list.
int variable_count_;
ZoneList<Expression*> body_definitions_;
DISALLOW_COPY_AND_ASSIGN(FlowGraphBuilder); DISALLOW_COPY_AND_ASSIGN(FlowGraphBuilder);
}; };
@ -589,15 +610,11 @@ class AssignedVariablesAnalyzer : public AstVisitor {
class ReachingDefinitions BASE_EMBEDDED { class ReachingDefinitions BASE_EMBEDDED {
public: public:
ReachingDefinitions(ZoneList<Node*>* postorder, ReachingDefinitions(ZoneList<Node*>* postorder,
ZoneList<Expression*>* definitions, ZoneList<Expression*>* body_definitions,
int variable_count) int variable_count)
: postorder_(postorder), : postorder_(postorder),
definitions_(definitions), body_definitions_(body_definitions),
variables_(variable_count) { variable_count_(variable_count) {
int definition_count = definitions->length();
for (int i = 0; i < variable_count; i++) {
variables_.Add(new BitVector(definition_count));
}
} }
static int IndexFor(Variable* var, int variable_count); static int IndexFor(Variable* var, int variable_count);
@ -609,15 +626,46 @@ class ReachingDefinitions BASE_EMBEDDED {
ZoneList<Node*>* postorder_; ZoneList<Node*>* postorder_;
// A list of all the definitions in the body. // A list of all the definitions in the body.
ZoneList<Expression*>* definitions_; ZoneList<Expression*>* body_definitions_;
// For each variable, the set of all its definitions. int variable_count_;
List<BitVector*> variables_;
DISALLOW_COPY_AND_ASSIGN(ReachingDefinitions); DISALLOW_COPY_AND_ASSIGN(ReachingDefinitions);
}; };
class TypeAnalyzer BASE_EMBEDDED {
public:
TypeAnalyzer(ZoneList<Node*>* postorder,
ZoneList<Expression*>* body_definitions,
int variable_count,
int param_count)
: postorder_(postorder),
body_definitions_(body_definitions),
variable_count_(variable_count),
param_count_(param_count) {}
void Compute();
private:
// Get the primitity of definition number i. Definitions are numbered
// by the flow graph builder.
bool IsPrimitiveDef(int def_num);
ZoneList<Node*>* postorder_;
ZoneList<Expression*>* body_definitions_;
int variable_count_;
int param_count_;
DISALLOW_COPY_AND_ASSIGN(TypeAnalyzer);
};
void MarkLiveCode(ZoneList<Node*>* nodes,
ZoneList<Expression*>* body_definitions,
int variable_count);
} } // namespace v8::internal } } // namespace v8::internal

1090
deps/v8/src/date.js

File diff suppressed because it is too large

2132
deps/v8/src/debug-debugger.js

File diff suppressed because it is too large

60
deps/v8/src/debug.cc

@ -685,29 +685,26 @@ bool Debug::CompileDebuggerScript(int index) {
// Compile the script. // Compile the script.
bool allow_natives_syntax = FLAG_allow_natives_syntax; bool allow_natives_syntax = FLAG_allow_natives_syntax;
FLAG_allow_natives_syntax = true; FLAG_allow_natives_syntax = true;
Handle<JSFunction> boilerplate; Handle<SharedFunctionInfo> function_info;
boilerplate = Compiler::Compile(source_code, function_info = Compiler::Compile(source_code,
script_name, script_name,
0, 0, 0, NULL, NULL,
0, Handle<String>::null(),
NULL, NATIVES_CODE);
NULL,
Handle<String>::null(),
NATIVES_CODE);
FLAG_allow_natives_syntax = allow_natives_syntax; FLAG_allow_natives_syntax = allow_natives_syntax;
// Silently ignore stack overflows during compilation. // Silently ignore stack overflows during compilation.
if (boilerplate.is_null()) { if (function_info.is_null()) {
ASSERT(Top::has_pending_exception()); ASSERT(Top::has_pending_exception());
Top::clear_pending_exception(); Top::clear_pending_exception();
return false; return false;
} }
// Execute the boilerplate function in the debugger context. // Execute the shared function in the debugger context.
Handle<Context> context = Top::global_context(); Handle<Context> context = Top::global_context();
bool caught_exception = false; bool caught_exception = false;
Handle<JSFunction> function = Handle<JSFunction> function =
Factory::NewFunctionFromBoilerplate(boilerplate, context); Factory::NewFunctionFromSharedFunctionInfo(function_info, context);
Handle<Object> result = Handle<Object> result =
Execution::TryCall(function, Handle<Object>(context->global()), Execution::TryCall(function, Handle<Object>(context->global()),
0, NULL, &caught_exception); 0, NULL, &caught_exception);
@ -1685,7 +1682,7 @@ void Debug::CreateScriptCache() {
// Perform two GCs to get rid of all unreferenced scripts. The first GC gets // Perform two GCs to get rid of all unreferenced scripts. The first GC gets
// rid of all the cached script wrappers and the second gets rid of the // rid of all the cached script wrappers and the second gets rid of the
// scripts which is no longer referenced. // scripts which are no longer referenced.
Heap::CollectAllGarbage(false); Heap::CollectAllGarbage(false);
Heap::CollectAllGarbage(false); Heap::CollectAllGarbage(false);
@ -1999,7 +1996,7 @@ void Debugger::OnAfterCompile(Handle<Script> script,
// If debugging there might be script break points registered for this // If debugging there might be script break points registered for this
// script. Make sure that these break points are set. // script. Make sure that these break points are set.
// Get the function UpdateScriptBreakPoints (defined in debug-delay.js). // Get the function UpdateScriptBreakPoints (defined in debug-debugger.js).
Handle<Object> update_script_break_points = Handle<Object> update_script_break_points =
Handle<Object>(Debug::debug_context()->global()->GetProperty( Handle<Object>(Debug::debug_context()->global()->GetProperty(
*Factory::LookupAsciiSymbol("UpdateScriptBreakPoints"))); *Factory::LookupAsciiSymbol("UpdateScriptBreakPoints")));
@ -2042,31 +2039,6 @@ void Debugger::OnAfterCompile(Handle<Script> script,
} }
void Debugger::OnNewFunction(Handle<JSFunction> function) {
return;
HandleScope scope;
// Bail out based on state or if there is no listener for this event
if (Debug::InDebugger()) return;
if (compiling_natives()) return;
if (!Debugger::EventActive(v8::NewFunction)) return;
// Enter the debugger.
EnterDebugger debugger;
if (debugger.FailedToEnter()) return;
// Create the event object.
bool caught_exception = false;
Handle<Object> event_data = MakeNewFunctionEvent(function, &caught_exception);
// Bail out and don't call debugger if exception.
if (caught_exception) {
return;
}
// Process debug event.
ProcessDebugEvent(v8::NewFunction, Handle<JSObject>::cast(event_data), true);
}
void Debugger::OnScriptCollected(int id) { void Debugger::OnScriptCollected(int id) {
HandleScope scope; HandleScope scope;
@ -2476,7 +2448,7 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun,
// Enter the debugger. // Enter the debugger.
EnterDebugger debugger; EnterDebugger debugger;
if (debugger.FailedToEnter() || !debugger.HasJavaScriptFrames()) { if (debugger.FailedToEnter()) {
return Factory::undefined_value(); return Factory::undefined_value();
} }
@ -2489,8 +2461,12 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun,
static const int kArgc = 2; static const int kArgc = 2;
Object** argv[kArgc] = { exec_state.location(), data.location() }; Object** argv[kArgc] = { exec_state.location(), data.location() };
Handle<Object> result = Execution::Call(fun, Factory::undefined_value(), Handle<Object> result = Execution::Call(
kArgc, argv, pending_exception); fun,
Handle<Object>(Debug::debug_context_->global_proxy()),
kArgc,
argv,
pending_exception);
return result; return result;
} }

67
deps/v8/src/factory.cc

@ -282,31 +282,26 @@ Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
} }
Handle<JSFunction> Factory::BaseNewFunctionFromBoilerplate( Handle<JSFunction> Factory::BaseNewFunctionFromSharedFunctionInfo(
Handle<JSFunction> boilerplate, Handle<SharedFunctionInfo> function_info,
Handle<Map> function_map, Handle<Map> function_map,
PretenureFlag pretenure) { PretenureFlag pretenure) {
ASSERT(boilerplate->IsBoilerplate());
ASSERT(!boilerplate->has_initial_map());
ASSERT(!boilerplate->has_prototype());
ASSERT(boilerplate->properties() == Heap::empty_fixed_array());
ASSERT(boilerplate->elements() == Heap::empty_fixed_array());
CALL_HEAP_FUNCTION(Heap::AllocateFunction(*function_map, CALL_HEAP_FUNCTION(Heap::AllocateFunction(*function_map,
boilerplate->shared(), *function_info,
Heap::the_hole_value(), Heap::the_hole_value(),
pretenure), pretenure),
JSFunction); JSFunction);
} }
Handle<JSFunction> Factory::NewFunctionFromBoilerplate( Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<JSFunction> boilerplate, Handle<SharedFunctionInfo> function_info,
Handle<Context> context, Handle<Context> context,
PretenureFlag pretenure) { PretenureFlag pretenure) {
Handle<JSFunction> result = BaseNewFunctionFromBoilerplate( Handle<JSFunction> result = BaseNewFunctionFromSharedFunctionInfo(
boilerplate, Top::function_map(), pretenure); function_info, Top::function_map(), pretenure);
result->set_context(*context); result->set_context(*context);
int number_of_literals = boilerplate->NumberOfLiterals(); int number_of_literals = function_info->num_literals();
Handle<FixedArray> literals = Handle<FixedArray> literals =
Factory::NewFixedArray(number_of_literals, pretenure); Factory::NewFixedArray(number_of_literals, pretenure);
if (number_of_literals > 0) { if (number_of_literals > 0) {
@ -490,36 +485,6 @@ Handle<JSFunction> Factory::NewFunction(Handle<String> name,
} }
Handle<JSFunction> Factory::NewFunctionBoilerplate(Handle<String> name,
int number_of_literals,
Handle<Code> code) {
Handle<JSFunction> function = NewFunctionBoilerplate(name);
function->set_code(*code);
int literals_array_size = number_of_literals;
// If the function contains object, regexp or array literals,
// allocate extra space for a literals array prefix containing the
// object, regexp and array constructor functions.
if (number_of_literals > 0) {
literals_array_size += JSFunction::kLiteralsPrefixSize;
}
Handle<FixedArray> literals =
Factory::NewFixedArray(literals_array_size, TENURED);
function->set_literals(*literals);
ASSERT(!function->has_initial_map());
ASSERT(!function->has_prototype());
return function;
}
Handle<JSFunction> Factory::NewFunctionBoilerplate(Handle<String> name) {
Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name);
CALL_HEAP_FUNCTION(Heap::AllocateFunction(Heap::boilerplate_function_map(),
*shared,
Heap::the_hole_value()),
JSFunction);
}
Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name, Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
InstanceType type, InstanceType type,
int instance_size, int instance_size,
@ -686,6 +651,22 @@ Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArray> elements,
} }
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<String> name, int number_of_literals, Handle<Code> code) {
Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name);
shared->set_code(*code);
int literals_array_size = number_of_literals;
// If the function contains object, regexp or array literals,
// allocate extra space for a literals array prefix containing the
// context.
if (number_of_literals > 0) {
literals_array_size += JSFunction::kLiteralsPrefixSize;
}
shared->set_num_literals(literals_array_size);
return shared;
}
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(Handle<String> name) { Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(Handle<String> name) {
CALL_HEAP_FUNCTION(Heap::AllocateSharedFunctionInfo(*name), CALL_HEAP_FUNCTION(Heap::AllocateSharedFunctionInfo(*name),
SharedFunctionInfo); SharedFunctionInfo);

22
deps/v8/src/factory.h

@ -218,8 +218,13 @@ class Factory : public AllStatic {
static Handle<JSFunction> NewFunction(Handle<Object> super, bool is_global); static Handle<JSFunction> NewFunction(Handle<Object> super, bool is_global);
static Handle<JSFunction> NewFunctionFromBoilerplate( static Handle<JSFunction> BaseNewFunctionFromSharedFunctionInfo(
Handle<JSFunction> boilerplate, Handle<SharedFunctionInfo> function_info,
Handle<Map> function_map,
PretenureFlag pretenure);
static Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> function_info,
Handle<Context> context, Handle<Context> context,
PretenureFlag pretenure = TENURED); PretenureFlag pretenure = TENURED);
@ -273,12 +278,6 @@ class Factory : public AllStatic {
Handle<Code> code, Handle<Code> code,
bool force_initial_map); bool force_initial_map);
static Handle<JSFunction> NewFunctionBoilerplate(Handle<String> name,
int number_of_literals,
Handle<Code> code);
static Handle<JSFunction> NewFunctionBoilerplate(Handle<String> name);
static Handle<JSFunction> NewFunction(Handle<Map> function_map, static Handle<JSFunction> NewFunction(Handle<Map> function_map,
Handle<SharedFunctionInfo> shared, Handle<Object> prototype); Handle<SharedFunctionInfo> shared, Handle<Object> prototype);
@ -337,6 +336,8 @@ class Factory : public AllStatic {
return Handle<String>(&Heap::hidden_symbol_); return Handle<String>(&Heap::hidden_symbol_);
} }
static Handle<SharedFunctionInfo> NewSharedFunctionInfo(
Handle<String> name, int number_of_literals, Handle<Code> code);
static Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name); static Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
static Handle<NumberDictionary> DictionaryAtNumberPut( static Handle<NumberDictionary> DictionaryAtNumberPut(
@ -377,11 +378,6 @@ class Factory : public AllStatic {
Handle<DescriptorArray> array, Handle<DescriptorArray> array,
Handle<Object> descriptors); Handle<Object> descriptors);
static Handle<JSFunction> BaseNewFunctionFromBoilerplate(
Handle<JSFunction> boilerplate,
Handle<Map> function_map,
PretenureFlag pretenure);
// Create a new map cache. // Create a new map cache.
static Handle<MapCache> NewMapCache(int at_least_space_for); static Handle<MapCache> NewMapCache(int at_least_space_for);

10
deps/v8/src/fast-codegen.cc

@ -195,9 +195,9 @@ void FastCodeGenSyntaxChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
} }
void FastCodeGenSyntaxChecker::VisitFunctionBoilerplateLiteral( void FastCodeGenSyntaxChecker::VisitSharedFunctionInfoLiteral(
FunctionBoilerplateLiteral* expr) { SharedFunctionInfoLiteral* expr) {
BAILOUT("FunctionBoilerplateLiteral"); BAILOUT("SharedFunctionInfoLiteral");
} }
@ -560,8 +560,8 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
} }
void FastCodeGenerator::VisitFunctionBoilerplateLiteral( void FastCodeGenerator::VisitSharedFunctionInfoLiteral(
FunctionBoilerplateLiteral* expr) { SharedFunctionInfoLiteral* expr) {
UNREACHABLE(); UNREACHABLE();
} }

8
deps/v8/src/flag-definitions.h

@ -122,9 +122,9 @@ DEFINE_bool(enable_armv7, true,
// bootstrapper.cc // bootstrapper.cc
DEFINE_string(expose_natives_as, NULL, "expose natives in global object") DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
DEFINE_string(expose_debug_as, NULL, "expose debug in global object") DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
DEFINE_string(natives_file, NULL, "alternative natives file")
DEFINE_bool(expose_gc, false, "expose gc extension") DEFINE_bool(expose_gc, false, "expose gc extension")
DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture") DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture")
DEFINE_bool(disable_native_files, false, "disable builtin natives files")
// builtins-ia32.cc // builtins-ia32.cc
DEFINE_bool(inline_new, true, "use fast inline allocation") DEFINE_bool(inline_new, true, "use fast inline allocation")
@ -160,6 +160,9 @@ DEFINE_bool(use_flow_graph, false, "perform flow-graph based optimizations")
// compilation-cache.cc // compilation-cache.cc
DEFINE_bool(compilation_cache, true, "enable compilation cache") DEFINE_bool(compilation_cache, true, "enable compilation cache")
// data-flow.cc
DEFINE_bool(loop_peeling, false, "Peel off the first iteration of loops.")
// debug.cc // debug.cc
DEFINE_bool(remote_debugging, false, "enable remote debugging") DEFINE_bool(remote_debugging, false, "enable remote debugging")
DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response") DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
@ -202,6 +205,9 @@ DEFINE_bool(cleanup_ics_at_gc, true,
"Flush inline caches prior to mark compact collection.") "Flush inline caches prior to mark compact collection.")
DEFINE_bool(cleanup_caches_in_maps_at_gc, true, DEFINE_bool(cleanup_caches_in_maps_at_gc, true,
"Flush code caches in maps during mark compact cycle.") "Flush code caches in maps during mark compact cycle.")
DEFINE_int(random_seed, 0,
"Default seed for initializing random generator "
"(0, the default, means to use system random).")
DEFINE_bool(canonicalize_object_literal_maps, true, DEFINE_bool(canonicalize_object_literal_maps, true,
"Canonicalize maps for object literals.") "Canonicalize maps for object literals.")

44
deps/v8/src/frame-element.h

@ -28,7 +28,7 @@
#ifndef V8_FRAME_ELEMENT_H_ #ifndef V8_FRAME_ELEMENT_H_
#define V8_FRAME_ELEMENT_H_ #define V8_FRAME_ELEMENT_H_
#include "number-info.h" #include "type-info-inl.h"
#include "macro-assembler.h" #include "macro-assembler.h"
#include "zone.h" #include "zone.h"
@ -54,25 +54,19 @@ class FrameElement BASE_EMBEDDED {
SYNCED SYNCED
}; };
inline NumberInfo number_info() { inline TypeInfo type_info() {
// Copied elements do not have number info. Instead // Copied elements do not have type info. Instead
// we have to inspect their backing element in the frame. // we have to inspect their backing element in the frame.
ASSERT(!is_copy()); ASSERT(!is_copy());
if (!is_constant()) { return TypeInfo::FromInt(TypeInfoField::decode(value_));
return NumberInfo::FromInt(NumberInfoField::decode(value_));
}
Handle<Object> value = handle();
if (value->IsSmi()) return NumberInfo::Smi();
if (value->IsHeapNumber()) return NumberInfo::HeapNumber();
return NumberInfo::Unknown();
} }
inline void set_number_info(NumberInfo info) { inline void set_type_info(TypeInfo info) {
// Copied elements do not have number info. Instead // Copied elements do not have type info. Instead
// we have to inspect their backing element in the frame. // we have to inspect their backing element in the frame.
ASSERT(!is_copy()); ASSERT(!is_copy());
value_ = value_ & ~NumberInfoField::mask(); value_ = value_ & ~TypeInfoField::mask();
value_ = value_ | NumberInfoField::encode(info.ToInt()); value_ = value_ | TypeInfoField::encode(info.ToInt());
} }
// The default constructor creates an invalid frame element. // The default constructor creates an invalid frame element.
@ -80,7 +74,7 @@ class FrameElement BASE_EMBEDDED {
value_ = TypeField::encode(INVALID) value_ = TypeField::encode(INVALID)
| CopiedField::encode(false) | CopiedField::encode(false)
| SyncedField::encode(false) | SyncedField::encode(false)
| NumberInfoField::encode(NumberInfo::Uninitialized().ToInt()) | TypeInfoField::encode(TypeInfo::Uninitialized().ToInt())
| DataField::encode(0); | DataField::encode(0);
} }
@ -91,7 +85,7 @@ class FrameElement BASE_EMBEDDED {
} }
// Factory function to construct an in-memory frame element. // Factory function to construct an in-memory frame element.
static FrameElement MemoryElement(NumberInfo info) { static FrameElement MemoryElement(TypeInfo info) {
FrameElement result(MEMORY, no_reg, SYNCED, info); FrameElement result(MEMORY, no_reg, SYNCED, info);
return result; return result;
} }
@ -99,7 +93,7 @@ class FrameElement BASE_EMBEDDED {
// Factory function to construct an in-register frame element. // Factory function to construct an in-register frame element.
static FrameElement RegisterElement(Register reg, static FrameElement RegisterElement(Register reg,
SyncFlag is_synced, SyncFlag is_synced,
NumberInfo info) { TypeInfo info) {
return FrameElement(REGISTER, reg, is_synced, info); return FrameElement(REGISTER, reg, is_synced, info);
} }
@ -107,7 +101,8 @@ class FrameElement BASE_EMBEDDED {
// compile time. // compile time.
static FrameElement ConstantElement(Handle<Object> value, static FrameElement ConstantElement(Handle<Object> value,
SyncFlag is_synced) { SyncFlag is_synced) {
FrameElement result(value, is_synced); TypeInfo info = TypeInfo::TypeFromValue(value);
FrameElement result(value, is_synced, info);
return result; return result;
} }
@ -223,20 +218,20 @@ class FrameElement BASE_EMBEDDED {
FrameElement(Type type, FrameElement(Type type,
Register reg, Register reg,
SyncFlag is_synced, SyncFlag is_synced,
NumberInfo info) { TypeInfo info) {
value_ = TypeField::encode(type) value_ = TypeField::encode(type)
| CopiedField::encode(false) | CopiedField::encode(false)
| SyncedField::encode(is_synced != NOT_SYNCED) | SyncedField::encode(is_synced != NOT_SYNCED)
| NumberInfoField::encode(info.ToInt()) | TypeInfoField::encode(info.ToInt())
| DataField::encode(reg.code_ > 0 ? reg.code_ : 0); | DataField::encode(reg.code_ > 0 ? reg.code_ : 0);
} }
// Used to construct constant elements. // Used to construct constant elements.
FrameElement(Handle<Object> value, SyncFlag is_synced) { FrameElement(Handle<Object> value, SyncFlag is_synced, TypeInfo info) {
value_ = TypeField::encode(CONSTANT) value_ = TypeField::encode(CONSTANT)
| CopiedField::encode(false) | CopiedField::encode(false)
| SyncedField::encode(is_synced != NOT_SYNCED) | SyncedField::encode(is_synced != NOT_SYNCED)
| NumberInfoField::encode(NumberInfo::Uninitialized().ToInt()) | TypeInfoField::encode(info.ToInt())
| DataField::encode(ConstantList()->length()); | DataField::encode(ConstantList()->length());
ConstantList()->Add(value); ConstantList()->Add(value);
} }
@ -262,12 +257,13 @@ class FrameElement BASE_EMBEDDED {
// Encode type, copied, synced and data in one 32 bit integer. // Encode type, copied, synced and data in one 32 bit integer.
uint32_t value_; uint32_t value_;
// Declare BitFields with template parameters <type, start, size>.
class TypeField: public BitField<Type, 0, 3> {}; class TypeField: public BitField<Type, 0, 3> {};
class CopiedField: public BitField<bool, 3, 1> {}; class CopiedField: public BitField<bool, 3, 1> {};
class SyncedField: public BitField<bool, 4, 1> {}; class SyncedField: public BitField<bool, 4, 1> {};
class UntaggedInt32Field: public BitField<bool, 5, 1> {}; class UntaggedInt32Field: public BitField<bool, 5, 1> {};
class NumberInfoField: public BitField<int, 6, 4> {}; class TypeInfoField: public BitField<int, 6, 6> {};
class DataField: public BitField<uint32_t, 10, 32 - 10> {}; class DataField: public BitField<uint32_t, 12, 32 - 12> {};
friend class VirtualFrame; friend class VirtualFrame;
}; };

2
deps/v8/src/frames.cc

@ -346,6 +346,7 @@ void StackFrame::UncookFramesForThread(ThreadLocalTop* thread) {
void StackFrame::Cook() { void StackFrame::Cook() {
Code* code = this->code(); Code* code = this->code();
ASSERT(code->IsCode());
for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) { for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
it.handler()->Cook(code); it.handler()->Cook(code);
} }
@ -356,6 +357,7 @@ void StackFrame::Cook() {
void StackFrame::Uncook() { void StackFrame::Uncook() {
Code* code = this->code(); Code* code = this->code();
ASSERT(code->IsCode());
for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) { for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
it.handler()->Uncook(code); it.handler()->Uncook(code);
} }

14
deps/v8/src/full-codegen.cc

@ -212,9 +212,9 @@ void FullCodeGenSyntaxChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
} }
void FullCodeGenSyntaxChecker::VisitFunctionBoilerplateLiteral( void FullCodeGenSyntaxChecker::VisitSharedFunctionInfoLiteral(
FunctionBoilerplateLiteral* expr) { SharedFunctionInfoLiteral* expr) {
BAILOUT("FunctionBoilerplateLiteral"); BAILOUT("SharedFunctionInfoLiteral");
} }
@ -524,8 +524,8 @@ void FullCodeGenerator::VisitDeclarations(
array->set_undefined(j++); array->set_undefined(j++);
} }
} else { } else {
Handle<JSFunction> function = Handle<SharedFunctionInfo> function =
Compiler::BuildBoilerplate(decl->fun(), script(), this); Compiler::BuildFunctionInfo(decl->fun(), script(), this);
// Check for stack-overflow exception. // Check for stack-overflow exception.
if (HasStackOverflow()) return; if (HasStackOverflow()) return;
array->set(j++, *function); array->set(j++, *function);
@ -998,8 +998,8 @@ void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
} }
void FullCodeGenerator::VisitFunctionBoilerplateLiteral( void FullCodeGenerator::VisitSharedFunctionInfoLiteral(
FunctionBoilerplateLiteral* expr) { SharedFunctionInfoLiteral* expr) {
UNREACHABLE(); UNREACHABLE();
} }

16
deps/v8/src/globals.h

@ -174,6 +174,15 @@ const int kBitsPerByteLog2 = 3;
const int kBitsPerPointer = kPointerSize * kBitsPerByte; const int kBitsPerPointer = kPointerSize * kBitsPerByte;
const int kBitsPerInt = kIntSize * kBitsPerByte; const int kBitsPerInt = kIntSize * kBitsPerByte;
// IEEE 754 single precision floating point number bit layout.
const uint32_t kBinary32SignMask = 0x80000000u;
const uint32_t kBinary32ExponentMask = 0x7f800000u;
const uint32_t kBinary32MantissaMask = 0x007fffffu;
const int kBinary32ExponentBias = 127;
const int kBinary32MaxExponent = 0xFE;
const int kBinary32MinExponent = 0x01;
const int kBinary32MantissaBits = 23;
const int kBinary32ExponentShift = 23;
// Zap-value: The value used for zapping dead objects. // Zap-value: The value used for zapping dead objects.
// Should be a recognizable hex value tagged as a heap object pointer. // Should be a recognizable hex value tagged as a heap object pointer.
@ -195,6 +204,10 @@ const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad);
// gives 8K bytes per page. // gives 8K bytes per page.
const int kPageSizeBits = 13; const int kPageSizeBits = 13;
// On Intel architecture, cache line size is 64 bytes.
// On ARM it may be less (32 bytes), but as far this constant is
// used for aligning data, it doesn't hurt to align on a greater value.
const int kProcessorCacheLineSize = 64;
// Constants relevant to double precision floating point numbers. // Constants relevant to double precision floating point numbers.
@ -321,7 +334,6 @@ enum Executability { NOT_EXECUTABLE, EXECUTABLE };
enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, VISIT_ONLY_STRONG }; enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, VISIT_ONLY_STRONG };
// Flag indicating whether code is built into the VM (one of the natives files). // Flag indicating whether code is built into the VM (one of the natives files).
enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE }; enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
@ -404,7 +416,7 @@ enum CallFunctionFlags {
// Type of properties. // Type of properties.
// Order of properties is significant. // Order of properties is significant.
// Must fit in the BitField PropertyDetails::TypeField. // Must fit in the BitField PropertyDetails::TypeField.
// A copy of this is in mirror-delay.js. // A copy of this is in mirror-debugger.js.
enum PropertyType { enum PropertyType {
NORMAL = 0, // only in slow mode NORMAL = 0, // only in slow mode
FIELD = 1, // only in fast mode FIELD = 1, // only in fast mode

100
deps/v8/src/handles.cc

@ -174,13 +174,6 @@ void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
} }
void SetExpectedNofPropertiesFromEstimate(Handle<JSFunction> func,
int estimate) {
SetExpectedNofProperties(
func, ExpectedNofPropertiesFromEstimate(estimate));
}
void NormalizeProperties(Handle<JSObject> object, void NormalizeProperties(Handle<JSObject> object,
PropertyNormalizationMode mode, PropertyNormalizationMode mode,
int expected_additional_properties) { int expected_additional_properties) {
@ -243,6 +236,15 @@ Handle<Object> ForceSetProperty(Handle<JSObject> object,
} }
Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
PropertyDetails details) {
CALL_HEAP_FUNCTION(object->SetNormalizedProperty(*key, *value, details),
Object);
}
Handle<Object> ForceDeleteProperty(Handle<JSObject> object, Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
Handle<Object> key) { Handle<Object> key) {
CALL_HEAP_FUNCTION(Runtime::ForceDeleteObjectProperty(object, key), Object); CALL_HEAP_FUNCTION(Runtime::ForceDeleteObjectProperty(object, key), Object);
@ -784,88 +786,4 @@ OptimizedObjectForAddingMultipleProperties::
} }
} }
void LoadLazy(Handle<JSObject> obj, bool* pending_exception) {
HandleScope scope;
Handle<FixedArray> info(FixedArray::cast(obj->map()->constructor()));
int index = Smi::cast(info->get(0))->value();
ASSERT(index >= 0);
Handle<Context> compile_context(Context::cast(info->get(1)));
Handle<Context> function_context(Context::cast(info->get(2)));
Handle<Object> receiver(compile_context->global()->builtins());
Vector<const char> name = Natives::GetScriptName(index);
Handle<JSFunction> boilerplate;
if (!Bootstrapper::NativesCacheLookup(name, &boilerplate)) {
Handle<String> source_code = Bootstrapper::NativesSourceLookup(index);
Handle<String> script_name = Factory::NewStringFromAscii(name);
bool allow_natives_syntax = FLAG_allow_natives_syntax;
FLAG_allow_natives_syntax = true;
boilerplate = Compiler::Compile(source_code, script_name, 0, 0, NULL, NULL,
Handle<String>::null(), NATIVES_CODE);
FLAG_allow_natives_syntax = allow_natives_syntax;
// If the compilation failed (possibly due to stack overflows), we
// should never enter the result in the natives cache. Instead we
// return from the function without marking the function as having
// been lazily loaded.
if (boilerplate.is_null()) {
*pending_exception = true;
return;
}
Bootstrapper::NativesCacheAdd(name, boilerplate);
}
// We shouldn't get here if compiling the script failed.
ASSERT(!boilerplate.is_null());
#ifdef ENABLE_DEBUGGER_SUPPORT
// When the debugger running in its own context touches lazy loaded
// functions loading can be triggered. In that case ensure that the
// execution of the boilerplate is in the correct context.
SaveContext save;
if (!Debug::debug_context().is_null() &&
Top::context() == *Debug::debug_context()) {
Top::set_context(*compile_context);
}
#endif
// Reset the lazy load data before running the script to make sure
// not to get recursive lazy loading.
obj->map()->set_needs_loading(false);
obj->map()->set_constructor(info->get(3));
// Run the script.
Handle<JSFunction> script_fun(
Factory::NewFunctionFromBoilerplate(boilerplate, function_context));
Execution::Call(script_fun, receiver, 0, NULL, pending_exception);
// If lazy loading failed, restore the unloaded state of obj.
if (*pending_exception) {
obj->map()->set_needs_loading(true);
obj->map()->set_constructor(*info);
}
}
void SetupLazy(Handle<JSObject> obj,
int index,
Handle<Context> compile_context,
Handle<Context> function_context) {
Handle<FixedArray> arr = Factory::NewFixedArray(4);
arr->set(0, Smi::FromInt(index));
arr->set(1, *compile_context); // Compile in this context
arr->set(2, *function_context); // Set function context to this
arr->set(3, obj->map()->constructor()); // Remember the constructor
Handle<Map> old_map(obj->map());
Handle<Map> new_map = Factory::CopyMapDropTransitions(old_map);
obj->set_map(*new_map);
new_map->set_needs_loading(true);
// Store the lazy loading info in the constructor field. We'll
// reestablish the constructor from the fixed array after loading.
new_map->set_constructor(*arr);
ASSERT(!obj->IsLoaded());
}
} } // namespace v8::internal } } // namespace v8::internal

14
deps/v8/src/handles.h

@ -210,6 +210,11 @@ Handle<Object> ForceSetProperty(Handle<JSObject> object,
Handle<Object> value, Handle<Object> value,
PropertyAttributes attributes); PropertyAttributes attributes);
Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
PropertyDetails details);
Handle<Object> ForceDeleteProperty(Handle<JSObject> object, Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
Handle<Object> key); Handle<Object> key);
@ -307,8 +312,6 @@ void SetPrototypeProperty(Handle<JSFunction> func, Handle<JSObject> value);
// Sets the expected number of properties based on estimate from compiler. // Sets the expected number of properties based on estimate from compiler.
void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared, void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
int estimate); int estimate);
void SetExpectedNofPropertiesFromEstimate(Handle<JSFunction> func,
int estimate);
Handle<JSGlobalProxy> ReinitializeJSGlobalProxy( Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
@ -340,13 +343,6 @@ bool CompileLazyInLoop(Handle<JSFunction> function,
// Returns the lazy compilation stub for argc arguments. // Returns the lazy compilation stub for argc arguments.
Handle<Code> ComputeLazyCompile(int argc); Handle<Code> ComputeLazyCompile(int argc);
// These deal with lazily loaded properties.
void SetupLazy(Handle<JSObject> obj,
int index,
Handle<Context> compile_context,
Handle<Context> function_context);
void LoadLazy(Handle<JSObject> obj, bool* pending_exception);
class NoHandleAllocation BASE_EMBEDDED { class NoHandleAllocation BASE_EMBEDDED {
public: public:
#ifndef DEBUG #ifndef DEBUG

4
deps/v8/src/heap-inl.h

@ -283,11 +283,11 @@ Object* Heap::PrepareForCompare(String* str) {
const int length = str->length(); const int length = str->length();
Object* obj = str->TryFlatten(); Object* obj = str->TryFlatten();
if (length <= kMaxAlwaysFlattenLength || if (length <= kMaxAlwaysFlattenLength ||
unflattended_strings_length_ >= kFlattenLongThreshold) { unflattened_strings_length_ >= kFlattenLongThreshold) {
return obj; return obj;
} }
if (obj->IsFailure()) { if (obj->IsFailure()) {
unflattended_strings_length_ += length; unflattened_strings_length_ += length;
} }
return str; return str;
} }

73
deps/v8/src/heap.cc

@ -98,6 +98,9 @@ size_t Heap::code_range_size_ = 0;
// set up by ConfigureHeap otherwise. // set up by ConfigureHeap otherwise.
int Heap::reserved_semispace_size_ = Heap::max_semispace_size_; int Heap::reserved_semispace_size_ = Heap::max_semispace_size_;
List<Heap::GCPrologueCallbackPair> Heap::gc_prologue_callbacks_;
List<Heap::GCEpilogueCallbackPair> Heap::gc_epilogue_callbacks_;
GCCallback Heap::global_gc_prologue_callback_ = NULL; GCCallback Heap::global_gc_prologue_callback_ = NULL;
GCCallback Heap::global_gc_epilogue_callback_ = NULL; GCCallback Heap::global_gc_epilogue_callback_ = NULL;
@ -114,7 +117,7 @@ Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
int Heap::mc_count_ = 0; int Heap::mc_count_ = 0;
int Heap::gc_count_ = 0; int Heap::gc_count_ = 0;
int Heap::unflattended_strings_length_ = 0; int Heap::unflattened_strings_length_ = 0;
int Heap::always_allocate_scope_depth_ = 0; int Heap::always_allocate_scope_depth_ = 0;
int Heap::linear_allocation_scope_depth_ = 0; int Heap::linear_allocation_scope_depth_ = 0;
@ -304,7 +307,7 @@ void Heap::ReportStatisticsAfterGC() {
void Heap::GarbageCollectionPrologue() { void Heap::GarbageCollectionPrologue() {
TranscendentalCache::Clear(); TranscendentalCache::Clear();
gc_count_++; gc_count_++;
unflattended_strings_length_ = 0; unflattened_strings_length_ = 0;
#ifdef DEBUG #ifdef DEBUG
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
allow_allocation(false); allow_allocation(false);
@ -547,6 +550,16 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
GCTracer::ExternalScope scope(tracer); GCTracer::ExternalScope scope(tracer);
global_gc_prologue_callback_(); global_gc_prologue_callback_();
} }
GCType gc_type =
collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
if (gc_type & gc_prologue_callbacks_[i].gc_type) {
gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
}
}
EnsureFromSpaceIsCommitted(); EnsureFromSpaceIsCommitted();
// Perform mark-sweep with optional compaction. // Perform mark-sweep with optional compaction.
@ -585,6 +598,15 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
amount_of_external_allocated_memory_; amount_of_external_allocated_memory_;
} }
GCCallbackFlags callback_flags = tracer->is_compacting()
? kGCCallbackFlagCompacted
: kNoGCCallbackFlags;
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
}
}
if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) { if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
ASSERT(!allocation_allowed_); ASSERT(!allocation_allowed_);
GCTracer::ExternalScope scope(tracer); GCTracer::ExternalScope scope(tracer);
@ -1269,7 +1291,7 @@ bool Heap::CreateInitialMaps() {
if (obj->IsFailure()) return false; if (obj->IsFailure()) return false;
set_oddball_map(Map::cast(obj)); set_oddball_map(Map::cast(obj));
// Allocate the empty array // Allocate the empty array.
obj = AllocateEmptyFixedArray(); obj = AllocateEmptyFixedArray();
if (obj->IsFailure()) return false; if (obj->IsFailure()) return false;
set_empty_fixed_array(FixedArray::cast(obj)); set_empty_fixed_array(FixedArray::cast(obj));
@ -1415,7 +1437,8 @@ bool Heap::CreateInitialMaps() {
if (obj->IsFailure()) return false; if (obj->IsFailure()) return false;
set_boilerplate_function_map(Map::cast(obj)); set_boilerplate_function_map(Map::cast(obj));
obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kSize); obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
SharedFunctionInfo::kAlignedSize);
if (obj->IsFailure()) return false; if (obj->IsFailure()) return false;
set_shared_function_info_map(Map::cast(obj)); set_shared_function_info_map(Map::cast(obj));
@ -2625,7 +2648,7 @@ Object* Heap::CopyJSObject(JSObject* source) {
// Update write barrier for all fields that lie beyond the header. // Update write barrier for all fields that lie beyond the header.
RecordWrites(clone_address, RecordWrites(clone_address,
JSObject::kHeaderSize, JSObject::kHeaderSize,
object_size - JSObject::kHeaderSize); (object_size - JSObject::kHeaderSize) / kPointerSize);
} else { } else {
clone = new_space_.AllocateRaw(object_size); clone = new_space_.AllocateRaw(object_size);
if (clone->IsFailure()) return clone; if (clone->IsFailure()) return clone;
@ -3786,6 +3809,46 @@ void Heap::Unprotect() {
#endif #endif
void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
ASSERT(callback != NULL);
GCPrologueCallbackPair pair(callback, gc_type);
ASSERT(!gc_prologue_callbacks_.Contains(pair));
return gc_prologue_callbacks_.Add(pair);
}
void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
ASSERT(callback != NULL);
for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
if (gc_prologue_callbacks_[i].callback == callback) {
gc_prologue_callbacks_.Remove(i);
return;
}
}
UNREACHABLE();
}
void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
ASSERT(callback != NULL);
GCEpilogueCallbackPair pair(callback, gc_type);
ASSERT(!gc_epilogue_callbacks_.Contains(pair));
return gc_epilogue_callbacks_.Add(pair);
}
void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
ASSERT(callback != NULL);
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
if (gc_epilogue_callbacks_[i].callback == callback) {
gc_epilogue_callbacks_.Remove(i);
return;
}
}
UNREACHABLE();
}
#ifdef DEBUG #ifdef DEBUG
class PrintHandleVisitor: public ObjectVisitor { class PrintHandleVisitor: public ObjectVisitor {

42
deps/v8/src/heap.h

@ -108,6 +108,7 @@ class ZoneScopeInfo;
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \ V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, natives_source_cache, NativesSourceCache) \ V(FixedArray, natives_source_cache, NativesSourceCache) \
V(Object, last_script_id, LastScriptId) \ V(Object, last_script_id, LastScriptId) \
V(Script, empty_script, EmptyScript) \
V(Smi, real_stack_limit, RealStackLimit) \ V(Smi, real_stack_limit, RealStackLimit) \
#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP #if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
@ -673,10 +674,20 @@ class Heap : public AllStatic {
static bool GarbageCollectionGreedyCheck(); static bool GarbageCollectionGreedyCheck();
#endif #endif
static void AddGCPrologueCallback(
GCEpilogueCallback callback, GCType gc_type_filter);
static void RemoveGCPrologueCallback(GCEpilogueCallback callback);
static void AddGCEpilogueCallback(
GCEpilogueCallback callback, GCType gc_type_filter);
static void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
static void SetGlobalGCPrologueCallback(GCCallback callback) { static void SetGlobalGCPrologueCallback(GCCallback callback) {
ASSERT((callback == NULL) ^ (global_gc_prologue_callback_ == NULL));
global_gc_prologue_callback_ = callback; global_gc_prologue_callback_ = callback;
} }
static void SetGlobalGCEpilogueCallback(GCCallback callback) { static void SetGlobalGCEpilogueCallback(GCCallback callback) {
ASSERT((callback == NULL) ^ (global_gc_epilogue_callback_ == NULL));
global_gc_epilogue_callback_ = callback; global_gc_epilogue_callback_ = callback;
} }
@ -758,6 +769,10 @@ class Heap : public AllStatic {
roots_[kNonMonomorphicCacheRootIndex] = value; roots_[kNonMonomorphicCacheRootIndex] = value;
} }
static void public_set_empty_script(Script* script) {
roots_[kEmptyScriptRootIndex] = script;
}
// Update the next script id. // Update the next script id.
static inline void SetLastScriptId(Object* last_script_id); static inline void SetLastScriptId(Object* last_script_id);
@ -965,7 +980,7 @@ class Heap : public AllStatic {
static int gc_count_; // how many gc happened static int gc_count_; // how many gc happened
// Total length of the strings we failed to flatten since the last GC. // Total length of the strings we failed to flatten since the last GC.
static int unflattended_strings_length_; static int unflattened_strings_length_;
#define ROOT_ACCESSOR(type, name, camel_name) \ #define ROOT_ACCESSOR(type, name, camel_name) \
static inline void set_##name(type* value) { \ static inline void set_##name(type* value) { \
@ -1041,6 +1056,30 @@ class Heap : public AllStatic {
// GC callback function, called before and after mark-compact GC. // GC callback function, called before and after mark-compact GC.
// Allocations in the callback function are disallowed. // Allocations in the callback function are disallowed.
struct GCPrologueCallbackPair {
GCPrologueCallbackPair(GCPrologueCallback callback, GCType gc_type)
: callback(callback), gc_type(gc_type) {
}
bool operator==(const GCPrologueCallbackPair& pair) const {
return pair.callback == callback;
}
GCPrologueCallback callback;
GCType gc_type;
};
static List<GCPrologueCallbackPair> gc_prologue_callbacks_;
struct GCEpilogueCallbackPair {
GCEpilogueCallbackPair(GCEpilogueCallback callback, GCType gc_type)
: callback(callback), gc_type(gc_type) {
}
bool operator==(const GCEpilogueCallbackPair& pair) const {
return pair.callback == callback;
}
GCEpilogueCallback callback;
GCType gc_type;
};
static List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
static GCCallback global_gc_prologue_callback_; static GCCallback global_gc_prologue_callback_;
static GCCallback global_gc_epilogue_callback_; static GCCallback global_gc_epilogue_callback_;
@ -1583,6 +1622,7 @@ class GCTracer BASE_EMBEDDED {
// Sets the flag that this is a compacting full GC. // Sets the flag that this is a compacting full GC.
void set_is_compacting() { is_compacting_ = true; } void set_is_compacting() { is_compacting_ = true; }
bool is_compacting() const { return is_compacting_; }
// Increment and decrement the count of marked objects. // Increment and decrement the count of marked objects.
void increment_marked_count() { ++marked_count_; } void increment_marked_count() { ++marked_count_; }

859
deps/v8/src/ia32/codegen-ia32.cc

File diff suppressed because it is too large

19
deps/v8/src/ia32/codegen-ia32.h

@ -373,6 +373,7 @@ class CodeGenerator: public AstVisitor {
// Take the Result that is an untagged int32, and convert it to a tagged // Take the Result that is an untagged int32, and convert it to a tagged
// Smi or HeapNumber. Remove the untagged_int32 flag from the result. // Smi or HeapNumber. Remove the untagged_int32 flag from the result.
void ConvertInt32ResultToNumber(Result* value); void ConvertInt32ResultToNumber(Result* value);
void ConvertInt32ResultToSmi(Result* value);
// Track loop nesting level. // Track loop nesting level.
int loop_nesting() const { return loop_nesting_; } int loop_nesting() const { return loop_nesting_; }
@ -528,6 +529,10 @@ class CodeGenerator: public AstVisitor {
Condition cc, Condition cc,
bool strict, bool strict,
ControlDestination* destination); ControlDestination* destination);
void GenerateInlineNumberComparison(Result* left_side,
Result* right_side,
Condition cc,
ControlDestination* dest);
// To prevent long attacker-controlled byte sequences, integer constants // To prevent long attacker-controlled byte sequences, integer constants
// from the JavaScript source are loaded in two parts if they are larger // from the JavaScript source are loaded in two parts if they are larger
@ -574,8 +579,8 @@ class CodeGenerator: public AstVisitor {
// name/value pairs. // name/value pairs.
void DeclareGlobals(Handle<FixedArray> pairs); void DeclareGlobals(Handle<FixedArray> pairs);
// Instantiate the function boilerplate. // Instantiate the function based on the shared function info.
Result InstantiateBoilerplate(Handle<JSFunction> boilerplate); Result InstantiateFunction(Handle<SharedFunctionInfo> function_info);
// Support for type checks. // Support for type checks.
void GenerateIsSmi(ZoneList<Expression*>* args); void GenerateIsSmi(ZoneList<Expression*>* args);
@ -652,7 +657,7 @@ class CodeGenerator: public AstVisitor {
void CodeForDoWhileConditionPosition(DoWhileStatement* stmt); void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
void CodeForSourcePosition(int pos); void CodeForSourcePosition(int pos);
void SetTypeForStackSlot(Slot* slot, NumberInfo info); void SetTypeForStackSlot(Slot* slot, TypeInfo info);
#ifdef DEBUG #ifdef DEBUG
// True if the registers are valid for entry to a block. There should // True if the registers are valid for entry to a block. There should
@ -736,7 +741,7 @@ class GenericBinaryOpStub: public CodeStub {
GenericBinaryOpStub(Token::Value op, GenericBinaryOpStub(Token::Value op,
OverwriteMode mode, OverwriteMode mode,
GenericBinaryFlags flags, GenericBinaryFlags flags,
NumberInfo operands_type) TypeInfo operands_type)
: op_(op), : op_(op),
mode_(mode), mode_(mode),
flags_(flags), flags_(flags),
@ -759,7 +764,7 @@ class GenericBinaryOpStub: public CodeStub {
args_in_registers_(ArgsInRegistersBits::decode(key)), args_in_registers_(ArgsInRegistersBits::decode(key)),
args_reversed_(ArgsReversedBits::decode(key)), args_reversed_(ArgsReversedBits::decode(key)),
use_sse3_(SSE3Bits::decode(key)), use_sse3_(SSE3Bits::decode(key)),
static_operands_type_(NumberInfo::ExpandedRepresentation( static_operands_type_(TypeInfo::ExpandedRepresentation(
StaticTypeInfoBits::decode(key))), StaticTypeInfoBits::decode(key))),
runtime_operands_type_(runtime_operands_type), runtime_operands_type_(runtime_operands_type),
name_(NULL) { name_(NULL) {
@ -786,7 +791,7 @@ class GenericBinaryOpStub: public CodeStub {
bool use_sse3_; bool use_sse3_;
// Number type information of operands, determined by code generator. // Number type information of operands, determined by code generator.
NumberInfo static_operands_type_; TypeInfo static_operands_type_;
// Operand type information determined at runtime. // Operand type information determined at runtime.
BinaryOpIC::TypeInfo runtime_operands_type_; BinaryOpIC::TypeInfo runtime_operands_type_;
@ -798,7 +803,7 @@ class GenericBinaryOpStub: public CodeStub {
#ifdef DEBUG #ifdef DEBUG
void Print() { void Print() {
PrintF("GenericBinaryOpStub %d (op %s), " PrintF("GenericBinaryOpStub %d (op %s), "
"(mode %d, flags %d, registers %d, reversed %d, number_info %s)\n", "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n",
MinorKey(), MinorKey(),
Token::String(op_), Token::String(op_),
static_cast<int>(mode_), static_cast<int>(mode_),

10
deps/v8/src/ia32/fast-codegen-ia32.cc

@ -195,9 +195,9 @@ void FastCodeGenSyntaxChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
} }
void FastCodeGenSyntaxChecker::VisitFunctionBoilerplateLiteral( void FastCodeGenSyntaxChecker::VisitSharedFunctionInfoLiteral(
FunctionBoilerplateLiteral* expr) { SharedFunctionInfoLiteral* expr) {
BAILOUT("FunctionBoilerplateLiteral"); BAILOUT("SharedFunctionInfoLiteral");
} }
@ -764,8 +764,8 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
} }
void FastCodeGenerator::VisitFunctionBoilerplateLiteral( void FastCodeGenerator::VisitSharedFunctionInfoLiteral(
FunctionBoilerplateLiteral* expr) { SharedFunctionInfoLiteral* expr) {
UNREACHABLE(); UNREACHABLE();
} }

12
deps/v8/src/ia32/full-codegen-ia32.cc

@ -777,15 +777,13 @@ void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral"); Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it. // Build the function boilerplate and instantiate it.
Handle<JSFunction> boilerplate = Handle<SharedFunctionInfo> function_info =
Compiler::BuildBoilerplate(expr, script(), this); Compiler::BuildFunctionInfo(expr, script(), this);
if (HasStackOverflow()) return; if (HasStackOverflow()) return;
ASSERT(boilerplate->IsBoilerplate());
// Create a new closure. // Create a new closure.
__ push(esi); __ push(esi);
__ push(Immediate(boilerplate)); __ push(Immediate(function_info));
__ CallRuntime(Runtime::kNewClosure, 2); __ CallRuntime(Runtime::kNewClosure, 2);
Apply(context_, eax); Apply(context_, eax);
} }
@ -1132,7 +1130,7 @@ void FullCodeGenerator::EmitBinaryOp(Token::Value op,
GenericBinaryOpStub stub(op, GenericBinaryOpStub stub(op,
NO_OVERWRITE, NO_OVERWRITE,
NO_GENERIC_BINARY_FLAGS, NO_GENERIC_BINARY_FLAGS,
NumberInfo::Unknown()); TypeInfo::Unknown());
__ CallStub(&stub); __ CallStub(&stub);
Apply(context, eax); Apply(context, eax);
} }
@ -1747,7 +1745,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
GenericBinaryOpStub stub(expr->binary_op(), GenericBinaryOpStub stub(expr->binary_op(),
NO_OVERWRITE, NO_OVERWRITE,
NO_GENERIC_BINARY_FLAGS, NO_GENERIC_BINARY_FLAGS,
NumberInfo::Unknown()); TypeInfo::Unknown());
stub.GenerateCall(masm(), eax, Smi::FromInt(1)); stub.GenerateCall(masm(), eax, Smi::FromInt(1));
__ bind(&done); __ bind(&done);

24
deps/v8/src/ia32/ic-ia32.cc

@ -254,23 +254,6 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
} }
// Helper function used to check that a value is either not an object
// or is loaded if it is an object.
static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
Register value, Register scratch) {
Label done;
// Check if the value is a Smi.
__ test(value, Immediate(kSmiTagMask));
__ j(zero, &done, not_taken);
// Check if the object has been loaded.
__ mov(scratch, FieldOperand(value, JSFunction::kMapOffset));
__ mov(scratch, FieldOperand(scratch, Map::kBitField2Offset));
__ test(scratch, Immediate(1 << Map::kNeedsLoading));
__ j(not_zero, miss, not_taken);
__ bind(&done);
}
// The offset from the inlined patch site to the start of the // The offset from the inlined patch site to the start of the
// inlined load instruction. It is 7 bytes (test eax, imm) plus // inlined load instruction. It is 7 bytes (test eax, imm) plus
// 6 bytes (jne slow_label). // 6 bytes (jne slow_label).
@ -495,7 +478,6 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ecx, ecx,
edi, edi,
DICTIONARY_CHECK_DONE); DICTIONARY_CHECK_DONE);
GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, ebx);
__ mov(eax, ecx); __ mov(eax, ecx);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1); __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0); __ ret(0);
@ -1146,11 +1128,6 @@ static void GenerateNormalHelper(MacroAssembler* masm,
__ CmpObjectType(edi, JS_FUNCTION_TYPE, eax); __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
__ j(not_equal, miss, not_taken); __ j(not_equal, miss, not_taken);
// Check that the function has been loaded. eax holds function's map.
__ mov(eax, FieldOperand(eax, Map::kBitField2Offset));
__ test(eax, Immediate(1 << Map::kNeedsLoading));
__ j(not_zero, miss, not_taken);
// Patch the receiver on stack with the global proxy if necessary. // Patch the receiver on stack with the global proxy if necessary.
if (is_global_object) { if (is_global_object) {
__ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset)); __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
@ -1341,7 +1318,6 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
edi, edi,
ebx, ebx,
CHECK_DICTIONARY); CHECK_DICTIONARY);
GenerateCheckNonObjectOrLoaded(masm, &miss, edi, edx);
__ mov(eax, edi); __ mov(eax, edi);
__ ret(0); __ ret(0);

4
deps/v8/src/ia32/register-allocator-ia32.cc

@ -75,7 +75,7 @@ void Result::ToRegister() {
Immediate(handle())); Immediate(handle()));
} }
// This result becomes a copy of the fresh one. // This result becomes a copy of the fresh one.
fresh.set_number_info(number_info()); fresh.set_type_info(type_info());
*this = fresh; *this = fresh;
} }
ASSERT(is_register()); ASSERT(is_register());
@ -122,7 +122,7 @@ void Result::ToRegister(Register target) {
} }
} }
} }
fresh.set_number_info(number_info()); fresh.set_type_info(type_info());
fresh.set_untagged_int32(is_untagged_int32()); fresh.set_untagged_int32(is_untagged_int32());
*this = fresh; *this = fresh;
} else if (is_register() && reg().is(target)) { } else if (is_register() && reg().is(target)) {

47
deps/v8/src/ia32/virtual-frame-ia32.cc

@ -162,7 +162,7 @@ void VirtualFrame::MakeMergable() {
if (element.is_constant() || element.is_copy()) { if (element.is_constant() || element.is_copy()) {
if (element.is_synced()) { if (element.is_synced()) {
// Just spill. // Just spill.
elements_[i] = FrameElement::MemoryElement(NumberInfo::Unknown()); elements_[i] = FrameElement::MemoryElement(TypeInfo::Unknown());
} else { } else {
// Allocate to a register. // Allocate to a register.
FrameElement backing_element; // Invalid if not a copy. FrameElement backing_element; // Invalid if not a copy.
@ -174,7 +174,7 @@ void VirtualFrame::MakeMergable() {
elements_[i] = elements_[i] =
FrameElement::RegisterElement(fresh.reg(), FrameElement::RegisterElement(fresh.reg(),
FrameElement::NOT_SYNCED, FrameElement::NOT_SYNCED,
NumberInfo::Unknown()); TypeInfo::Unknown());
Use(fresh.reg(), i); Use(fresh.reg(), i);
// Emit a move. // Emit a move.
@ -207,7 +207,7 @@ void VirtualFrame::MakeMergable() {
// The copy flag is not relied on before the end of this loop, // The copy flag is not relied on before the end of this loop,
// including when registers are spilled. // including when registers are spilled.
elements_[i].clear_copied(); elements_[i].clear_copied();
elements_[i].set_number_info(NumberInfo::Unknown()); elements_[i].set_type_info(TypeInfo::Unknown());
} }
} }
} }
@ -597,12 +597,12 @@ int VirtualFrame::InvalidateFrameSlotAt(int index) {
elements_[new_backing_index] = elements_[new_backing_index] =
FrameElement::RegisterElement(backing_reg, FrameElement::RegisterElement(backing_reg,
FrameElement::SYNCED, FrameElement::SYNCED,
original.number_info()); original.type_info());
} else { } else {
elements_[new_backing_index] = elements_[new_backing_index] =
FrameElement::RegisterElement(backing_reg, FrameElement::RegisterElement(backing_reg,
FrameElement::NOT_SYNCED, FrameElement::NOT_SYNCED,
original.number_info()); original.type_info());
} }
// Update the other copies. // Update the other copies.
for (int i = new_backing_index + 1; i < element_count(); i++) { for (int i = new_backing_index + 1; i < element_count(); i++) {
@ -634,7 +634,7 @@ void VirtualFrame::TakeFrameSlotAt(int index) {
FrameElement new_element = FrameElement new_element =
FrameElement::RegisterElement(fresh.reg(), FrameElement::RegisterElement(fresh.reg(),
FrameElement::NOT_SYNCED, FrameElement::NOT_SYNCED,
original.number_info()); original.type_info());
Use(fresh.reg(), element_count()); Use(fresh.reg(), element_count());
elements_.Add(new_element); elements_.Add(new_element);
__ mov(fresh.reg(), Operand(ebp, fp_relative(index))); __ mov(fresh.reg(), Operand(ebp, fp_relative(index)));
@ -796,7 +796,7 @@ void VirtualFrame::UntaggedPushFrameSlotAt(int index) {
FrameElement new_element = FrameElement new_element =
FrameElement::RegisterElement(fresh_reg, FrameElement::RegisterElement(fresh_reg,
FrameElement::NOT_SYNCED, FrameElement::NOT_SYNCED,
original.number_info()); original.type_info());
new_element.set_untagged_int32(true); new_element.set_untagged_int32(true);
Use(fresh_reg, element_count()); Use(fresh_reg, element_count());
fresh.Unuse(); // BreakTarget does not handle a live Result well. fresh.Unuse(); // BreakTarget does not handle a live Result well.
@ -808,7 +808,7 @@ void VirtualFrame::UntaggedPushFrameSlotAt(int index) {
__ mov(fresh_reg, Operand(ebp, fp_relative(index))); __ mov(fresh_reg, Operand(ebp, fp_relative(index)));
} }
// Now convert the value to int32, or bail out. // Now convert the value to int32, or bail out.
if (original.number_info().IsSmi()) { if (original.type_info().IsSmi()) {
__ SmiUntag(fresh_reg); __ SmiUntag(fresh_reg);
// Pushing the element is completely done. // Pushing the element is completely done.
} else { } else {
@ -819,7 +819,7 @@ void VirtualFrame::UntaggedPushFrameSlotAt(int index) {
__ jmp(&done); __ jmp(&done);
__ bind(&not_smi); __ bind(&not_smi);
if (!original.number_info().IsNumber()) { if (!original.type_info().IsNumber()) {
__ cmp(FieldOperand(fresh_reg, HeapObject::kMapOffset), __ cmp(FieldOperand(fresh_reg, HeapObject::kMapOffset),
Factory::heap_number_map()); Factory::heap_number_map());
cgen()->unsafe_bailout_->Branch(not_equal); cgen()->unsafe_bailout_->Branch(not_equal);
@ -1040,18 +1040,23 @@ Result VirtualFrame::CallKeyedStoreIC() {
PrepareForCall(0, 0); PrepareForCall(0, 0);
if (!cgen()->allocator()->is_used(eax) || if (!cgen()->allocator()->is_used(eax) ||
(value.is_register() && value.reg().is(eax))) { (value.is_register() && value.reg().is(eax))) {
value.ToRegister(eax); // No effect if value is in eax already. if (!cgen()->allocator()->is_used(eax)) {
value.ToRegister(eax);
}
MoveResultsToRegisters(&key, &receiver, ecx, edx); MoveResultsToRegisters(&key, &receiver, ecx, edx);
value.Unuse(); value.Unuse();
} else if (!cgen()->allocator()->is_used(ecx) || } else if (!cgen()->allocator()->is_used(ecx) ||
(key.is_register() && key.reg().is(ecx))) { (key.is_register() && key.reg().is(ecx))) {
// Receiver and/or key are in eax. if (!cgen()->allocator()->is_used(ecx)) {
key.ToRegister(ecx); key.ToRegister(ecx);
}
MoveResultsToRegisters(&value, &receiver, eax, edx); MoveResultsToRegisters(&value, &receiver, eax, edx);
key.Unuse(); key.Unuse();
} else if (!cgen()->allocator()->is_used(edx) || } else if (!cgen()->allocator()->is_used(edx) ||
(receiver.is_register() && receiver.reg().is(edx))) { (receiver.is_register() && receiver.reg().is(edx))) {
receiver.ToRegister(edx); if (!cgen()->allocator()->is_used(edx)) {
receiver.ToRegister(edx);
}
MoveResultsToRegisters(&key, &value, ecx, eax); MoveResultsToRegisters(&key, &value, ecx, eax);
receiver.Unuse(); receiver.Unuse();
} else { } else {
@ -1146,11 +1151,11 @@ Result VirtualFrame::Pop() {
ASSERT(element.is_untagged_int32() == cgen()->in_safe_int32_mode()); ASSERT(element.is_untagged_int32() == cgen()->in_safe_int32_mode());
// Get number type information of the result. // Get number type information of the result.
NumberInfo info; TypeInfo info;
if (!element.is_copy()) { if (!element.is_copy()) {
info = element.number_info(); info = element.type_info();
} else { } else {
info = elements_[element.index()].number_info(); info = elements_[element.index()].type_info();
} }
bool pop_needed = (stack_pointer_ == index); bool pop_needed = (stack_pointer_ == index);
@ -1160,7 +1165,7 @@ Result VirtualFrame::Pop() {
Result temp = cgen()->allocator()->Allocate(); Result temp = cgen()->allocator()->Allocate();
ASSERT(temp.is_valid()); ASSERT(temp.is_valid());
__ pop(temp.reg()); __ pop(temp.reg());
temp.set_number_info(info); temp.set_type_info(info);
temp.set_untagged_int32(element.is_untagged_int32()); temp.set_untagged_int32(element.is_untagged_int32());
return temp; return temp;
} }
@ -1193,7 +1198,7 @@ Result VirtualFrame::Pop() {
FrameElement new_element = FrameElement new_element =
FrameElement::RegisterElement(temp.reg(), FrameElement::RegisterElement(temp.reg(),
FrameElement::SYNCED, FrameElement::SYNCED,
element.number_info()); element.type_info());
// Preserve the copy flag on the element. // Preserve the copy flag on the element.
if (element.is_copied()) new_element.set_copied(); if (element.is_copied()) new_element.set_copied();
elements_[index] = new_element; elements_[index] = new_element;
@ -1228,7 +1233,7 @@ void VirtualFrame::EmitPop(Operand operand) {
} }
void VirtualFrame::EmitPush(Register reg, NumberInfo info) { void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
ASSERT(stack_pointer_ == element_count() - 1); ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement(info)); elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++; stack_pointer_++;
@ -1236,7 +1241,7 @@ void VirtualFrame::EmitPush(Register reg, NumberInfo info) {
} }
void VirtualFrame::EmitPush(Operand operand, NumberInfo info) { void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
ASSERT(stack_pointer_ == element_count() - 1); ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement(info)); elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++; stack_pointer_++;
@ -1244,7 +1249,7 @@ void VirtualFrame::EmitPush(Operand operand, NumberInfo info) {
} }
void VirtualFrame::EmitPush(Immediate immediate, NumberInfo info) { void VirtualFrame::EmitPush(Immediate immediate, TypeInfo info) {
ASSERT(stack_pointer_ == element_count() - 1); ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement(info)); elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++; stack_pointer_++;

22
deps/v8/src/ia32/virtual-frame-ia32.h

@ -28,7 +28,7 @@
#ifndef V8_IA32_VIRTUAL_FRAME_IA32_H_ #ifndef V8_IA32_VIRTUAL_FRAME_IA32_H_
#define V8_IA32_VIRTUAL_FRAME_IA32_H_ #define V8_IA32_VIRTUAL_FRAME_IA32_H_
#include "number-info.h" #include "type-info.h"
#include "register-allocator.h" #include "register-allocator.h"
#include "scopes.h" #include "scopes.h"
@ -84,7 +84,7 @@ class VirtualFrame: public ZoneObject {
// Create a duplicate of an existing valid frame element. // Create a duplicate of an existing valid frame element.
FrameElement CopyElementAt(int index, FrameElement CopyElementAt(int index,
NumberInfo info = NumberInfo::Uninitialized()); TypeInfo info = TypeInfo::Uninitialized());
// The number of elements on the virtual frame. // The number of elements on the virtual frame.
int element_count() { return elements_.length(); } int element_count() { return elements_.length(); }
@ -138,7 +138,7 @@ class VirtualFrame: public ZoneObject {
void ForgetElements(int count); void ForgetElements(int count);
// Spill all values from the frame to memory. // Spill all values from the frame to memory.
void SpillAll(); inline void SpillAll();
// Spill all occurrences of a specific register from the frame. // Spill all occurrences of a specific register from the frame.
void Spill(Register reg) { void Spill(Register reg) {
@ -199,7 +199,7 @@ class VirtualFrame: public ZoneObject {
// Prepare for returning from the frame by spilling locals. This // Prepare for returning from the frame by spilling locals. This
// avoids generating unnecessary merge code when jumping to the // avoids generating unnecessary merge code when jumping to the
// shared return site. Emits code for spills. // shared return site. Emits code for spills.
void PrepareForReturn(); inline void PrepareForReturn();
// Number of local variables after when we use a loop for allocating. // Number of local variables after when we use a loop for allocating.
static const int kLocalVarBound = 10; static const int kLocalVarBound = 10;
@ -398,14 +398,14 @@ class VirtualFrame: public ZoneObject {
// Push an element on top of the expression stack and emit a // Push an element on top of the expression stack and emit a
// corresponding push instruction. // corresponding push instruction.
void EmitPush(Register reg, void EmitPush(Register reg,
NumberInfo info = NumberInfo::Unknown()); TypeInfo info = TypeInfo::Unknown());
void EmitPush(Operand operand, void EmitPush(Operand operand,
NumberInfo info = NumberInfo::Unknown()); TypeInfo info = TypeInfo::Unknown());
void EmitPush(Immediate immediate, void EmitPush(Immediate immediate,
NumberInfo info = NumberInfo::Unknown()); TypeInfo info = TypeInfo::Unknown());
// Push an element on the virtual frame. // Push an element on the virtual frame.
inline void Push(Register reg, NumberInfo info = NumberInfo::Unknown()); inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown());
inline void Push(Handle<Object> value); inline void Push(Handle<Object> value);
inline void Push(Smi* value); inline void Push(Smi* value);
@ -417,7 +417,7 @@ class VirtualFrame: public ZoneObject {
// This assert will trigger if you try to push the same value twice. // This assert will trigger if you try to push the same value twice.
ASSERT(result->is_valid()); ASSERT(result->is_valid());
if (result->is_register()) { if (result->is_register()) {
Push(result->reg(), result->number_info()); Push(result->reg(), result->type_info());
} else { } else {
ASSERT(result->is_constant()); ASSERT(result->is_constant());
Push(result->handle()); Push(result->handle());
@ -447,8 +447,8 @@ class VirtualFrame: public ZoneObject {
} }
// Update the type information of a variable frame element directly. // Update the type information of a variable frame element directly.
inline void SetTypeForLocalAt(int index, NumberInfo info); inline void SetTypeForLocalAt(int index, TypeInfo info);
inline void SetTypeForParamAt(int index, NumberInfo info); inline void SetTypeForParamAt(int index, TypeInfo info);
private: private:
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset; static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;

17
deps/v8/src/ic.cc

@ -436,7 +436,7 @@ Object* CallIC::LoadFunction(State state,
} }
// Lookup is valid: Update inline cache and stub cache. // Lookup is valid: Update inline cache and stub cache.
if (FLAG_use_ic && lookup.IsLoaded()) { if (FLAG_use_ic) {
UpdateCaches(&lookup, state, object, name); UpdateCaches(&lookup, state, object, name);
} }
@ -484,7 +484,6 @@ void CallIC::UpdateCaches(LookupResult* lookup,
State state, State state,
Handle<Object> object, Handle<Object> object,
Handle<String> name) { Handle<String> name) {
ASSERT(lookup->IsLoaded());
// Bail out if we didn't find a result. // Bail out if we didn't find a result.
if (!lookup->IsProperty() || !lookup->IsCacheable()) return; if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
@ -647,7 +646,6 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
FLAG_use_ic && FLAG_use_ic &&
state == PREMONOMORPHIC && state == PREMONOMORPHIC &&
lookup.IsProperty() && lookup.IsProperty() &&
lookup.IsLoaded() &&
lookup.IsCacheable() && lookup.IsCacheable() &&
lookup.holder() == *object && lookup.holder() == *object &&
lookup.type() == FIELD && lookup.type() == FIELD &&
@ -669,7 +667,7 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
} }
// Update inline cache and stub cache. // Update inline cache and stub cache.
if (FLAG_use_ic && lookup.IsLoaded()) { if (FLAG_use_ic) {
UpdateCaches(&lookup, state, object, name); UpdateCaches(&lookup, state, object, name);
} }
@ -695,7 +693,6 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
State state, State state,
Handle<Object> object, Handle<Object> object,
Handle<String> name) { Handle<String> name) {
ASSERT(lookup->IsLoaded());
// Bail out if we didn't find a result. // Bail out if we didn't find a result.
if (!lookup->IsProperty() || !lookup->IsCacheable()) return; if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
@ -857,7 +854,7 @@ Object* KeyedLoadIC::Load(State state,
} }
} }
if (FLAG_use_ic && lookup.IsLoaded()) { if (FLAG_use_ic) {
UpdateCaches(&lookup, state, object, name); UpdateCaches(&lookup, state, object, name);
} }
@ -912,7 +909,6 @@ Object* KeyedLoadIC::Load(State state,
void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state, void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
Handle<Object> object, Handle<String> name) { Handle<Object> object, Handle<String> name) {
ASSERT(lookup->IsLoaded());
// Bail out if we didn't find a result. // Bail out if we didn't find a result.
if (!lookup->IsProperty() || !lookup->IsCacheable()) return; if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
@ -993,8 +989,6 @@ static bool StoreICableLookup(LookupResult* lookup) {
// state. // state.
if (lookup->IsReadOnly()) return false; if (lookup->IsReadOnly()) return false;
if (!lookup->IsLoaded()) return false;
return true; return true;
} }
@ -1073,7 +1067,6 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
Handle<JSObject> receiver, Handle<JSObject> receiver,
Handle<String> name, Handle<String> name,
Handle<Object> value) { Handle<Object> value) {
ASSERT(lookup->IsLoaded());
// Skip JSGlobalProxy. // Skip JSGlobalProxy.
ASSERT(!receiver->IsJSGlobalProxy()); ASSERT(!receiver->IsJSGlobalProxy());
@ -1181,7 +1174,7 @@ Object* KeyedStoreIC::Store(State state,
receiver->LocalLookup(*name, &lookup); receiver->LocalLookup(*name, &lookup);
// Update inline cache and stub cache. // Update inline cache and stub cache.
if (FLAG_use_ic && lookup.IsLoaded()) { if (FLAG_use_ic) {
UpdateCaches(&lookup, state, receiver, name, value); UpdateCaches(&lookup, state, receiver, name, value);
} }
@ -1215,8 +1208,6 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
Handle<JSObject> receiver, Handle<JSObject> receiver,
Handle<String> name, Handle<String> name,
Handle<Object> value) { Handle<Object> value) {
ASSERT(lookup->IsLoaded());
// Skip JSGlobalProxy. // Skip JSGlobalProxy.
if (receiver->IsJSGlobalProxy()) return; if (receiver->IsJSGlobalProxy()) return;

268
deps/v8/src/json.js

@ -0,0 +1,268 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
var $JSON = global.JSON;
function ParseJSONUnfiltered(text) {
var s = $String(text);
var f = %CompileString(text, true);
return f();
}
function Revive(holder, name, reviver) {
var val = holder[name];
if (IS_OBJECT(val)) {
if (IS_ARRAY(val)) {
var length = val.length;
for (var i = 0; i < length; i++) {
var newElement = Revive(val, $String(i), reviver);
val[i] = newElement;
}
} else {
for (var p in val) {
if (ObjectHasOwnProperty.call(val, p)) {
var newElement = Revive(val, p, reviver);
if (IS_UNDEFINED(newElement)) {
delete val[p];
} else {
val[p] = newElement;
}
}
}
}
}
return reviver.call(holder, name, val);
}
function JSONParse(text, reviver) {
var unfiltered = ParseJSONUnfiltered(text);
if (IS_FUNCTION(reviver)) {
return Revive({'': unfiltered}, '', reviver);
} else {
return unfiltered;
}
}
var characterQuoteCache = {
'\"': '\\"',
'\\': '\\\\',
'/': '\\/',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
'\x0B': '\\u000b'
};
function QuoteSingleJSONCharacter(c) {
if (c in characterQuoteCache) {
return characterQuoteCache[c];
}
var charCode = c.charCodeAt(0);
var result;
if (charCode < 16) result = '\\u000';
else if (charCode < 256) result = '\\u00';
else if (charCode < 4096) result = '\\u0';
else result = '\\u';
result += charCode.toString(16);
characterQuoteCache[c] = result;
return result;
}
function QuoteJSONString(str) {
var quotable = /[\\\"\x00-\x1f\x80-\uffff]/g;
return '"' + str.replace(quotable, QuoteSingleJSONCharacter) + '"';
}
function StackContains(stack, val) {
var length = stack.length;
for (var i = 0; i < length; i++) {
if (stack[i] === val) {
return true;
}
}
return false;
}
function SerializeArray(value, replacer, stack, indent, gap) {
if (StackContains(stack, value)) {
throw MakeTypeError('circular_structure', []);
}
stack.push(value);
var stepback = indent;
indent += gap;
var partial = [];
var len = value.length;
for (var i = 0; i < len; i++) {
var strP = JSONSerialize($String(i), value, replacer, stack,
indent, gap);
if (IS_UNDEFINED(strP)) {
strP = "null";
}
partial.push(strP);
}
var final;
if (gap == "") {
final = "[" + partial.join(",") + "]";
} else if (partial.length > 0) {
var separator = ",\n" + indent;
final = "[\n" + indent + partial.join(separator) + "\n" +
stepback + "]";
} else {
final = "[]";
}
stack.pop();
return final;
}
function SerializeObject(value, replacer, stack, indent, gap) {
if (StackContains(stack, value)) {
throw MakeTypeError('circular_structure', []);
}
stack.push(value);
var stepback = indent;
indent += gap;
var partial = [];
if (IS_ARRAY(replacer)) {
var length = replacer.length;
for (var i = 0; i < length; i++) {
if (ObjectHasOwnProperty.call(replacer, i)) {
var p = replacer[i];
var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
if (!IS_UNDEFINED(strP)) {
var member = QuoteJSONString(p) + ":";
if (gap != "") member += " ";
member += strP;
partial.push(member);
}
}
}
} else {
for (var p in value) {
if (ObjectHasOwnProperty.call(value, p)) {
var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
if (!IS_UNDEFINED(strP)) {
var member = QuoteJSONString(p) + ":";
if (gap != "") member += " ";
member += strP;
partial.push(member);
}
}
}
}
var final;
if (gap == "") {
final = "{" + partial.join(",") + "}";
} else if (partial.length > 0) {
var separator = ",\n" + indent;
final = "{\n" + indent + partial.join(separator) + "\n" +
stepback + "}";
} else {
final = "{}";
}
stack.pop();
return final;
}
function JSONSerialize(key, holder, replacer, stack, indent, gap) {
var value = holder[key];
if (IS_OBJECT(value) && value) {
var toJSON = value.toJSON;
if (IS_FUNCTION(toJSON)) {
value = toJSON.call(value, key);
}
}
if (IS_FUNCTION(replacer)) {
value = replacer.call(holder, key, value);
}
// Unwrap value if necessary
if (IS_OBJECT(value)) {
if (IS_NUMBER_WRAPPER(value)) {
value = $Number(value);
} else if (IS_STRING_WRAPPER(value)) {
value = $String(value);
} else if (IS_BOOLEAN_WRAPPER(value)) {
value = $Boolean(value);
}
}
switch (typeof value) {
case "string":
return QuoteJSONString(value);
case "object":
if (!value) {
return "null";
} else if (IS_ARRAY(value)) {
return SerializeArray(value, replacer, stack, indent, gap);
} else {
return SerializeObject(value, replacer, stack, indent, gap);
}
case "number":
return $isFinite(value) ? $String(value) : "null";
case "boolean":
return value ? "true" : "false";
}
}
function JSONStringify(value, replacer, space) {
var stack = [];
var indent = "";
if (IS_OBJECT(space)) {
// Unwrap 'space' if it is wrapped
if (IS_NUMBER_WRAPPER(space)) {
space = $Number(space);
} else if (IS_STRING_WRAPPER(space)) {
space = $String(space);
}
}
var gap;
if (IS_NUMBER(space)) {
space = $Math.min(space, 10);
gap = "";
for (var i = 0; i < space; i++) {
gap += " ";
}
} else if (IS_STRING(space)) {
if (space.length > 10) {
gap = space.substring(0, 10);
} else {
gap = space;
}
} else {
gap = "";
}
return JSONSerialize('', {'': value}, replacer, stack, indent, gap);
}
function SetupJSON() {
InstallFunctions($JSON, DONT_ENUM, $Array(
"parse", JSONParse,
"stringify", JSONStringify
));
}
SetupJSON();

20
deps/v8/src/jsregexp.cc

@ -66,11 +66,6 @@ Handle<Object> RegExpImpl::CreateRegExpLiteral(Handle<JSFunction> constructor,
Handle<String> pattern, Handle<String> pattern,
Handle<String> flags, Handle<String> flags,
bool* has_pending_exception) { bool* has_pending_exception) {
// Ensure that the constructor function has been loaded.
if (!constructor->IsLoaded()) {
LoadLazy(constructor, has_pending_exception);
if (*has_pending_exception) return Handle<Object>();
}
// Call the construct code with 2 arguments. // Call the construct code with 2 arguments.
Object** argv[2] = { Handle<Object>::cast(pattern).location(), Object** argv[2] = { Handle<Object>::cast(pattern).location(),
Handle<Object>::cast(flags).location() }; Handle<Object>::cast(flags).location() };
@ -4992,7 +4987,9 @@ int AssertionNode::ComputeFirstCharacterSet(int budget) {
case AFTER_WORD_CHARACTER: { case AFTER_WORD_CHARACTER: {
ASSERT_NOT_NULL(on_success()); ASSERT_NOT_NULL(on_success());
budget = on_success()->ComputeFirstCharacterSet(budget); budget = on_success()->ComputeFirstCharacterSet(budget);
set_first_character_set(on_success()->first_character_set()); if (budget >= 0) {
set_first_character_set(on_success()->first_character_set());
}
break; break;
} }
} }
@ -5018,6 +5015,10 @@ int ActionNode::ComputeFirstCharacterSet(int budget) {
int BackReferenceNode::ComputeFirstCharacterSet(int budget) { int BackReferenceNode::ComputeFirstCharacterSet(int budget) {
// We don't know anything about the first character of a backreference // We don't know anything about the first character of a backreference
// at this point. // at this point.
// The potential first characters are the first characters of the capture,
// and the first characters of the on_success node, depending on whether the
// capture can be empty and whether it is known to be participating or known
// not to be.
return kComputeFirstCharacterSetFail; return kComputeFirstCharacterSetFail;
} }
@ -5037,8 +5038,11 @@ int TextNode::ComputeFirstCharacterSet(int budget) {
} else { } else {
ASSERT(text.type == TextElement::CHAR_CLASS); ASSERT(text.type == TextElement::CHAR_CLASS);
RegExpCharacterClass* char_class = text.data.u_char_class; RegExpCharacterClass* char_class = text.data.u_char_class;
ZoneList<CharacterRange>* ranges = char_class->ranges();
// TODO(lrn): Canonicalize ranges when they are created
// instead of waiting until now.
CharacterRange::Canonicalize(ranges);
if (char_class->is_negated()) { if (char_class->is_negated()) {
ZoneList<CharacterRange>* ranges = char_class->ranges();
int length = ranges->length(); int length = ranges->length();
int new_length = length + 1; int new_length = length + 1;
if (length > 0) { if (length > 0) {
@ -5052,7 +5056,7 @@ int TextNode::ComputeFirstCharacterSet(int budget) {
CharacterRange::Negate(ranges, negated_ranges); CharacterRange::Negate(ranges, negated_ranges);
set_first_character_set(negated_ranges); set_first_character_set(negated_ranges);
} else { } else {
set_first_character_set(char_class->ranges()); set_first_character_set(ranges);
} }
} }
} }

51
deps/v8/src/jump-target-heavy-inl.h

@ -0,0 +1,51 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_JUMP_TARGET_HEAVY_INL_H_
#define V8_JUMP_TARGET_HEAVY_INL_H_
#include "virtual-frame-inl.h"
namespace v8 {
namespace internal {
void JumpTarget::InitializeEntryElement(int index, FrameElement* target) {
FrameElement* element = &entry_frame_->elements_[index];
element->clear_copied();
if (target->is_register()) {
entry_frame_->set_register_location(target->reg(), index);
} else if (target->is_copy()) {
entry_frame_->elements_[target->index()].set_copied();
}
if (direction_ == BIDIRECTIONAL && !target->is_copy()) {
element->set_type_info(TypeInfo::Unknown());
}
}
} } // namespace v8::internal
#endif // V8_JUMP_TARGET_HEAVY_INL_H_

363
deps/v8/src/jump-target-heavy.cc

@ -0,0 +1,363 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen-inl.h"
#include "jump-target-inl.h"
#include "register-allocator-inl.h"
namespace v8 {
namespace internal {
void JumpTarget::Jump(Result* arg) {
ASSERT(cgen()->has_valid_frame());
cgen()->frame()->Push(arg);
DoJump();
}
void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) {
ASSERT(cgen()->has_valid_frame());
// We want to check that non-frame registers at the call site stay in
// the same registers on the fall-through branch.
#ifdef DEBUG
Result::Type arg_type = arg->type();
Register arg_reg = arg->is_register() ? arg->reg() : no_reg;
#endif
cgen()->frame()->Push(arg);
DoBranch(cc, hint);
*arg = cgen()->frame()->Pop();
ASSERT(arg->type() == arg_type);
ASSERT(!arg->is_register() || arg->reg().is(arg_reg));
}
void JumpTarget::Branch(Condition cc, Result* arg0, Result* arg1, Hint hint) {
ASSERT(cgen()->has_valid_frame());
// We want to check that non-frame registers at the call site stay in
// the same registers on the fall-through branch.
#ifdef DEBUG
Result::Type arg0_type = arg0->type();
Register arg0_reg = arg0->is_register() ? arg0->reg() : no_reg;
Result::Type arg1_type = arg1->type();
Register arg1_reg = arg1->is_register() ? arg1->reg() : no_reg;
#endif
cgen()->frame()->Push(arg0);
cgen()->frame()->Push(arg1);
DoBranch(cc, hint);
*arg1 = cgen()->frame()->Pop();
*arg0 = cgen()->frame()->Pop();
ASSERT(arg0->type() == arg0_type);
ASSERT(!arg0->is_register() || arg0->reg().is(arg0_reg));
ASSERT(arg1->type() == arg1_type);
ASSERT(!arg1->is_register() || arg1->reg().is(arg1_reg));
}
void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) {
ASSERT(cgen()->has_valid_frame());
int count = cgen()->frame()->height() - expected_height_;
if (count > 0) {
// We negate and branch here rather than using DoBranch's negate
// and branch. This gives us a hook to remove statement state
// from the frame.
JumpTarget fall_through;
// Branch to fall through will not negate, because it is a
// forward-only target.
fall_through.Branch(NegateCondition(cc), NegateHint(hint));
Jump(arg); // May emit merge code here.
fall_through.Bind();
} else {
#ifdef DEBUG
Result::Type arg_type = arg->type();
Register arg_reg = arg->is_register() ? arg->reg() : no_reg;
#endif
cgen()->frame()->Push(arg);
DoBranch(cc, hint);
*arg = cgen()->frame()->Pop();
ASSERT(arg->type() == arg_type);
ASSERT(!arg->is_register() || arg->reg().is(arg_reg));
}
}
void JumpTarget::Bind(Result* arg) {
if (cgen()->has_valid_frame()) {
cgen()->frame()->Push(arg);
}
DoBind();
*arg = cgen()->frame()->Pop();
}
void JumpTarget::Bind(Result* arg0, Result* arg1) {
if (cgen()->has_valid_frame()) {
cgen()->frame()->Push(arg0);
cgen()->frame()->Push(arg1);
}
DoBind();
*arg1 = cgen()->frame()->Pop();
*arg0 = cgen()->frame()->Pop();
}
void JumpTarget::ComputeEntryFrame() {
// Given: a collection of frames reaching by forward CFG edges and
// the directionality of the block. Compute: an entry frame for the
// block.
Counters::compute_entry_frame.Increment();
#ifdef DEBUG
if (compiling_deferred_code_) {
ASSERT(reaching_frames_.length() > 1);
VirtualFrame* frame = reaching_frames_[0];
bool all_identical = true;
for (int i = 1; i < reaching_frames_.length(); i++) {
if (!frame->Equals(reaching_frames_[i])) {
all_identical = false;
break;
}
}
ASSERT(!all_identical || all_identical);
}
#endif
// Choose an initial frame.
VirtualFrame* initial_frame = reaching_frames_[0];
// A list of pointers to frame elements in the entry frame. NULL
// indicates that the element has not yet been determined.
int length = initial_frame->element_count();
ZoneList<FrameElement*> elements(length);
// Initially populate the list of elements based on the initial
// frame.
for (int i = 0; i < length; i++) {
FrameElement element = initial_frame->elements_[i];
// We do not allow copies or constants in bidirectional frames.
if (direction_ == BIDIRECTIONAL) {
if (element.is_constant() || element.is_copy()) {
elements.Add(NULL);
continue;
}
}
elements.Add(&initial_frame->elements_[i]);
}
// Compute elements based on the other reaching frames.
if (reaching_frames_.length() > 1) {
for (int i = 0; i < length; i++) {
FrameElement* element = elements[i];
for (int j = 1; j < reaching_frames_.length(); j++) {
// Element computation is monotonic: new information will not
// change our decision about undetermined or invalid elements.
if (element == NULL || !element->is_valid()) break;
FrameElement* other = &reaching_frames_[j]->elements_[i];
element = element->Combine(other);
if (element != NULL && !element->is_copy()) {
ASSERT(other != NULL);
// We overwrite the number information of one of the incoming frames.
// This is safe because we only use the frame for emitting merge code.
// The number information of incoming frames is not used anymore.
element->set_type_info(TypeInfo::Combine(element->type_info(),
other->type_info()));
}
}
elements[i] = element;
}
}
// Build the new frame. A freshly allocated frame has memory elements
// for the parameters and some platform-dependent elements (e.g.,
// return address). Replace those first.
entry_frame_ = new VirtualFrame();
int index = 0;
for (; index < entry_frame_->element_count(); index++) {
FrameElement* target = elements[index];
// If the element is determined, set it now. Count registers. Mark
// elements as copied exactly when they have a copy. Undetermined
// elements are initially recorded as if in memory.
if (target != NULL) {
entry_frame_->elements_[index] = *target;
InitializeEntryElement(index, target);
}
}
// Then fill in the rest of the frame with new elements.
for (; index < length; index++) {
FrameElement* target = elements[index];
if (target == NULL) {
entry_frame_->elements_.Add(
FrameElement::MemoryElement(TypeInfo::Uninitialized()));
} else {
entry_frame_->elements_.Add(*target);
InitializeEntryElement(index, target);
}
}
// Allocate any still-undetermined frame elements to registers or
// memory, from the top down.
for (int i = length - 1; i >= 0; i--) {
if (elements[i] == NULL) {
// Loop over all the reaching frames to check whether the element
// is synced on all frames and to count the registers it occupies.
bool is_synced = true;
RegisterFile candidate_registers;
int best_count = kMinInt;
int best_reg_num = RegisterAllocator::kInvalidRegister;
TypeInfo info = TypeInfo::Uninitialized();
for (int j = 0; j < reaching_frames_.length(); j++) {
FrameElement element = reaching_frames_[j]->elements_[i];
if (direction_ == BIDIRECTIONAL) {
info = TypeInfo::Unknown();
} else if (!element.is_copy()) {
info = TypeInfo::Combine(info, element.type_info());
} else {
// New elements will not be copies, so get number information from
// backing element in the reaching frame.
info = TypeInfo::Combine(info,
reaching_frames_[j]->elements_[element.index()].type_info());
}
is_synced = is_synced && element.is_synced();
if (element.is_register() && !entry_frame_->is_used(element.reg())) {
// Count the register occurrence and remember it if better
// than the previous best.
int num = RegisterAllocator::ToNumber(element.reg());
candidate_registers.Use(num);
if (candidate_registers.count(num) > best_count) {
best_count = candidate_registers.count(num);
best_reg_num = num;
}
}
}
// We must have a number type information now (not for copied elements).
ASSERT(entry_frame_->elements_[i].is_copy()
|| !info.IsUninitialized());
// If the value is synced on all frames, put it in memory. This
// costs nothing at the merge code but will incur a
// memory-to-register move when the value is needed later.
if (is_synced) {
// Already recorded as a memory element.
// Set combined number info.
entry_frame_->elements_[i].set_type_info(info);
continue;
}
// Try to put it in a register. If there was no best choice
// consider any free register.
if (best_reg_num == RegisterAllocator::kInvalidRegister) {
for (int j = 0; j < RegisterAllocator::kNumRegisters; j++) {
if (!entry_frame_->is_used(j)) {
best_reg_num = j;
break;
}
}
}
if (best_reg_num != RegisterAllocator::kInvalidRegister) {
// If there was a register choice, use it. Preserve the copied
// flag on the element.
bool is_copied = entry_frame_->elements_[i].is_copied();
Register reg = RegisterAllocator::ToRegister(best_reg_num);
entry_frame_->elements_[i] =
FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED,
TypeInfo::Uninitialized());
if (is_copied) entry_frame_->elements_[i].set_copied();
entry_frame_->set_register_location(reg, i);
}
// Set combined number info.
entry_frame_->elements_[i].set_type_info(info);
}
}
// If we have incoming backward edges assert we forget all number information.
#ifdef DEBUG
if (direction_ == BIDIRECTIONAL) {
for (int i = 0; i < length; ++i) {
if (!entry_frame_->elements_[i].is_copy()) {
ASSERT(entry_frame_->elements_[i].type_info().IsUnknown());
}
}
}
#endif
// The stack pointer is at the highest synced element or the base of
// the expression stack.
int stack_pointer = length - 1;
while (stack_pointer >= entry_frame_->expression_base_index() &&
!entry_frame_->elements_[stack_pointer].is_synced()) {
stack_pointer--;
}
entry_frame_->stack_pointer_ = stack_pointer;
}
DeferredCode::DeferredCode()
: masm_(CodeGeneratorScope::Current()->masm()),
statement_position_(masm_->current_statement_position()),
position_(masm_->current_position()) {
ASSERT(statement_position_ != RelocInfo::kNoPosition);
ASSERT(position_ != RelocInfo::kNoPosition);
CodeGeneratorScope::Current()->AddDeferred(this);
#ifdef DEBUG
comment_ = "";
#endif
// Copy the register locations from the code generator's frame.
// These are the registers that will be spilled on entry to the
// deferred code and restored on exit.
VirtualFrame* frame = CodeGeneratorScope::Current()->frame();
int sp_offset = frame->fp_relative(frame->stack_pointer_);
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
int loc = frame->register_location(i);
if (loc == VirtualFrame::kIllegalIndex) {
registers_[i] = kIgnore;
} else if (frame->elements_[loc].is_synced()) {
// Needs to be restored on exit but not saved on entry.
registers_[i] = frame->fp_relative(loc) | kSyncedFlag;
} else {
int offset = frame->fp_relative(loc);
registers_[i] = (offset < sp_offset) ? kPush : offset;
}
}
}
} } // namespace v8::internal

19
deps/v8/src/jump-target-inl.h

@ -30,6 +30,12 @@
#include "virtual-frame-inl.h" #include "virtual-frame-inl.h"
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
#include "jump-target-heavy-inl.h"
#else
#include "jump-target-light-inl.h"
#endif
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -37,19 +43,6 @@ CodeGenerator* JumpTarget::cgen() {
return CodeGeneratorScope::Current(); return CodeGeneratorScope::Current();
} }
void JumpTarget::InitializeEntryElement(int index, FrameElement* target) {
FrameElement* element = &entry_frame_->elements_[index];
element->clear_copied();
if (target->is_register()) {
entry_frame_->set_register_location(target->reg(), index);
} else if (target->is_copy()) {
entry_frame_->elements_[target->index()].set_copied();
}
if (direction_ == BIDIRECTIONAL && !target->is_copy()) {
element->set_number_info(NumberInfo::Unknown());
}
}
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_JUMP_TARGET_INL_H_ #endif // V8_JUMP_TARGET_INL_H_

42
deps/v8/src/jump-target-light-inl.h

@ -0,0 +1,42 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_JUMP_TARGET_LIGHT_INL_H_
#define V8_JUMP_TARGET_LIGHT_INL_H_
#include "virtual-frame-inl.h"
namespace v8 {
namespace internal {
void JumpTarget::InitializeEntryElement(int index, FrameElement* target) {
UNIMPLEMENTED();
}
} } // namespace v8::internal
#endif // V8_JUMP_TARGET_LIGHT_INL_H_

99
deps/v8/src/jump-target-light.cc

@ -0,0 +1,99 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen-inl.h"
#include "jump-target-inl.h"
namespace v8 {
namespace internal {
void JumpTarget::Jump(Result* arg) {
UNIMPLEMENTED();
}
void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) {
UNIMPLEMENTED();
}
void JumpTarget::Branch(Condition cc, Result* arg0, Result* arg1, Hint hint) {
UNIMPLEMENTED();
}
void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) {
UNIMPLEMENTED();
}
void JumpTarget::Bind(Result* arg) {
UNIMPLEMENTED();
}
void JumpTarget::Bind(Result* arg0, Result* arg1) {
UNIMPLEMENTED();
}
void JumpTarget::ComputeEntryFrame() {
UNIMPLEMENTED();
}
DeferredCode::DeferredCode()
: masm_(CodeGeneratorScope::Current()->masm()),
statement_position_(masm_->current_statement_position()),
position_(masm_->current_position()) {
ASSERT(statement_position_ != RelocInfo::kNoPosition);
ASSERT(position_ != RelocInfo::kNoPosition);
CodeGeneratorScope::Current()->AddDeferred(this);
#ifdef DEBUG
comment_ = "";
#endif
// Copy the register locations from the code generator's frame.
// These are the registers that will be spilled on entry to the
// deferred code and restored on exit.
VirtualFrame* frame = CodeGeneratorScope::Current()->frame();
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
int loc = frame->register_location(i);
if (loc == VirtualFrame::kIllegalIndex) {
registers_[i] = kIgnore;
} else {
// Needs to be restored on exit but not saved on entry.
registers_[i] = frame->fp_relative(loc) | kSyncedFlag;
}
}
}
} } // namespace v8::internal

268
deps/v8/src/jump-target.cc

@ -48,289 +48,21 @@ void JumpTarget::Unuse() {
} }
void JumpTarget::ComputeEntryFrame() {
// Given: a collection of frames reaching by forward CFG edges and
// the directionality of the block. Compute: an entry frame for the
// block.
Counters::compute_entry_frame.Increment();
#ifdef DEBUG
if (compiling_deferred_code_) {
ASSERT(reaching_frames_.length() > 1);
VirtualFrame* frame = reaching_frames_[0];
bool all_identical = true;
for (int i = 1; i < reaching_frames_.length(); i++) {
if (!frame->Equals(reaching_frames_[i])) {
all_identical = false;
break;
}
}
ASSERT(!all_identical || all_identical);
}
#endif
// Choose an initial frame.
VirtualFrame* initial_frame = reaching_frames_[0];
// A list of pointers to frame elements in the entry frame. NULL
// indicates that the element has not yet been determined.
int length = initial_frame->element_count();
ZoneList<FrameElement*> elements(length);
// Initially populate the list of elements based on the initial
// frame.
for (int i = 0; i < length; i++) {
FrameElement element = initial_frame->elements_[i];
// We do not allow copies or constants in bidirectional frames.
if (direction_ == BIDIRECTIONAL) {
if (element.is_constant() || element.is_copy()) {
elements.Add(NULL);
continue;
}
}
elements.Add(&initial_frame->elements_[i]);
}
// Compute elements based on the other reaching frames.
if (reaching_frames_.length() > 1) {
for (int i = 0; i < length; i++) {
FrameElement* element = elements[i];
for (int j = 1; j < reaching_frames_.length(); j++) {
// Element computation is monotonic: new information will not
// change our decision about undetermined or invalid elements.
if (element == NULL || !element->is_valid()) break;
FrameElement* other = &reaching_frames_[j]->elements_[i];
element = element->Combine(other);
if (element != NULL && !element->is_copy()) {
ASSERT(other != NULL);
// We overwrite the number information of one of the incoming frames.
// This is safe because we only use the frame for emitting merge code.
// The number information of incoming frames is not used anymore.
element->set_number_info(NumberInfo::Combine(element->number_info(),
other->number_info()));
}
}
elements[i] = element;
}
}
// Build the new frame. A freshly allocated frame has memory elements
// for the parameters and some platform-dependent elements (e.g.,
// return address). Replace those first.
entry_frame_ = new VirtualFrame();
int index = 0;
for (; index < entry_frame_->element_count(); index++) {
FrameElement* target = elements[index];
// If the element is determined, set it now. Count registers. Mark
// elements as copied exactly when they have a copy. Undetermined
// elements are initially recorded as if in memory.
if (target != NULL) {
entry_frame_->elements_[index] = *target;
InitializeEntryElement(index, target);
}
}
// Then fill in the rest of the frame with new elements.
for (; index < length; index++) {
FrameElement* target = elements[index];
if (target == NULL) {
entry_frame_->elements_.Add(
FrameElement::MemoryElement(NumberInfo::Uninitialized()));
} else {
entry_frame_->elements_.Add(*target);
InitializeEntryElement(index, target);
}
}
// Allocate any still-undetermined frame elements to registers or
// memory, from the top down.
for (int i = length - 1; i >= 0; i--) {
if (elements[i] == NULL) {
// Loop over all the reaching frames to check whether the element
// is synced on all frames and to count the registers it occupies.
bool is_synced = true;
RegisterFile candidate_registers;
int best_count = kMinInt;
int best_reg_num = RegisterAllocator::kInvalidRegister;
NumberInfo info = NumberInfo::Uninitialized();
for (int j = 0; j < reaching_frames_.length(); j++) {
FrameElement element = reaching_frames_[j]->elements_[i];
if (direction_ == BIDIRECTIONAL) {
info = NumberInfo::Unknown();
} else if (!element.is_copy()) {
info = NumberInfo::Combine(info, element.number_info());
} else {
// New elements will not be copies, so get number information from
// backing element in the reaching frame.
info = NumberInfo::Combine(info,
reaching_frames_[j]->elements_[element.index()].number_info());
}
is_synced = is_synced && element.is_synced();
if (element.is_register() && !entry_frame_->is_used(element.reg())) {
// Count the register occurrence and remember it if better
// than the previous best.
int num = RegisterAllocator::ToNumber(element.reg());
candidate_registers.Use(num);
if (candidate_registers.count(num) > best_count) {
best_count = candidate_registers.count(num);
best_reg_num = num;
}
}
}
// We must have a number type information now (not for copied elements).
ASSERT(entry_frame_->elements_[i].is_copy()
|| !info.IsUninitialized());
// If the value is synced on all frames, put it in memory. This
// costs nothing at the merge code but will incur a
// memory-to-register move when the value is needed later.
if (is_synced) {
// Already recorded as a memory element.
// Set combined number info.
entry_frame_->elements_[i].set_number_info(info);
continue;
}
// Try to put it in a register. If there was no best choice
// consider any free register.
if (best_reg_num == RegisterAllocator::kInvalidRegister) {
for (int j = 0; j < RegisterAllocator::kNumRegisters; j++) {
if (!entry_frame_->is_used(j)) {
best_reg_num = j;
break;
}
}
}
if (best_reg_num != RegisterAllocator::kInvalidRegister) {
// If there was a register choice, use it. Preserve the copied
// flag on the element.
bool is_copied = entry_frame_->elements_[i].is_copied();
Register reg = RegisterAllocator::ToRegister(best_reg_num);
entry_frame_->elements_[i] =
FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED,
NumberInfo::Uninitialized());
if (is_copied) entry_frame_->elements_[i].set_copied();
entry_frame_->set_register_location(reg, i);
}
// Set combined number info.
entry_frame_->elements_[i].set_number_info(info);
}
}
// If we have incoming backward edges assert we forget all number information.
#ifdef DEBUG
if (direction_ == BIDIRECTIONAL) {
for (int i = 0; i < length; ++i) {
if (!entry_frame_->elements_[i].is_copy()) {
ASSERT(entry_frame_->elements_[i].number_info().IsUnknown());
}
}
}
#endif
// The stack pointer is at the highest synced element or the base of
// the expression stack.
int stack_pointer = length - 1;
while (stack_pointer >= entry_frame_->expression_base_index() &&
!entry_frame_->elements_[stack_pointer].is_synced()) {
stack_pointer--;
}
entry_frame_->stack_pointer_ = stack_pointer;
}
void JumpTarget::Jump() { void JumpTarget::Jump() {
DoJump(); DoJump();
} }
void JumpTarget::Jump(Result* arg) {
ASSERT(cgen()->has_valid_frame());
cgen()->frame()->Push(arg);
DoJump();
}
void JumpTarget::Branch(Condition cc, Hint hint) { void JumpTarget::Branch(Condition cc, Hint hint) {
DoBranch(cc, hint); DoBranch(cc, hint);
} }
#ifdef DEBUG
#define DECLARE_ARGCHECK_VARS(name) \
Result::Type name##_type = name->type(); \
Register name##_reg = name->is_register() ? name->reg() : no_reg
#define ASSERT_ARGCHECK(name) \
ASSERT(name->type() == name##_type); \
ASSERT(!name->is_register() || name->reg().is(name##_reg))
#else
#define DECLARE_ARGCHECK_VARS(name) do {} while (false)
#define ASSERT_ARGCHECK(name) do {} while (false)
#endif
void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) {
ASSERT(cgen()->has_valid_frame());
// We want to check that non-frame registers at the call site stay in
// the same registers on the fall-through branch.
DECLARE_ARGCHECK_VARS(arg);
cgen()->frame()->Push(arg);
DoBranch(cc, hint);
*arg = cgen()->frame()->Pop();
ASSERT_ARGCHECK(arg);
}
void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) {
ASSERT(cgen()->has_valid_frame());
int count = cgen()->frame()->height() - expected_height_;
if (count > 0) {
// We negate and branch here rather than using DoBranch's negate
// and branch. This gives us a hook to remove statement state
// from the frame.
JumpTarget fall_through;
// Branch to fall through will not negate, because it is a
// forward-only target.
fall_through.Branch(NegateCondition(cc), NegateHint(hint));
Jump(arg); // May emit merge code here.
fall_through.Bind();
} else {
DECLARE_ARGCHECK_VARS(arg);
cgen()->frame()->Push(arg);
DoBranch(cc, hint);
*arg = cgen()->frame()->Pop();
ASSERT_ARGCHECK(arg);
}
}
#undef DECLARE_ARGCHECK_VARS
#undef ASSERT_ARGCHECK
void JumpTarget::Bind() { void JumpTarget::Bind() {
DoBind(); DoBind();
} }
void JumpTarget::Bind(Result* arg) {
if (cgen()->has_valid_frame()) {
cgen()->frame()->Push(arg);
}
DoBind();
*arg = cgen()->frame()->Pop();
}
void JumpTarget::AddReachingFrame(VirtualFrame* frame) { void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
ASSERT(reaching_frames_.length() == merge_labels_.length()); ASSERT(reaching_frames_.length() == merge_labels_.length());
ASSERT(entry_frame_ == NULL); ASSERT(entry_frame_ == NULL);

5
deps/v8/src/jump-target.h

@ -117,12 +117,17 @@ class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
// the target and the fall-through. // the target and the fall-through.
virtual void Branch(Condition cc, Hint hint = no_hint); virtual void Branch(Condition cc, Hint hint = no_hint);
virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint); virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
virtual void Branch(Condition cc,
Result* arg0,
Result* arg1,
Hint hint = no_hint);
// Bind a jump target. If there is no current frame at the binding // Bind a jump target. If there is no current frame at the binding
// site, there must be at least one frame reaching via a forward // site, there must be at least one frame reaching via a forward
// jump. // jump.
virtual void Bind(); virtual void Bind();
virtual void Bind(Result* arg); virtual void Bind(Result* arg);
virtual void Bind(Result* arg0, Result* arg1);
// Emit a call to a jump target. There must be a current frame at // Emit a call to a jump target. There must be a current frame at
// the call. The frame at the target is the same as the current // the call. The frame at the target is the same as the current

431
deps/v8/src/liveedit-debugger.js

@ -0,0 +1,431 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// LiveEdit feature implementation. The script should be executed after
// debug-debugger.js.
// Changes script text and recompiles all relevant functions if possible.
// The change is always a substring (change_pos, change_pos + change_len)
// being replaced with a completely different string new_str.
//
// Only one function will have its Code changed in result of this function.
// All nested functions (should they have any instances at the moment) are left
// unchanged and re-linked to a newly created script instance representing old
// version of the source. (Generally speaking,
// during the change all nested functions are erased and completely different
// set of nested functions are introduced.) All other functions just have
// their positions updated.
//
// @param {Script} script that is being changed
// @param {Array} change_log a list that collects engineer-readable description
// of what happened.
Debug.LiveEditChangeScript = function(script, change_pos, change_len, new_str,
change_log) {
// So far the function works as namespace.
var liveedit = Debug.LiveEditChangeScript;
var Assert = liveedit.Assert;
// Fully compiles source string as a script. Returns Array of
// FunctionCompileInfo -- a descriptions of all functions of the script.
// Elements of array are ordered by start positions of functions (from top
// to bottom) in the source. Fields outer_index and next_sibling_index help
// to navigate the nesting structure of functions.
//
// The script is used for compilation, because it produces code that
// needs to be linked with some particular script (for nested functions).
function DebugGatherCompileInfo(source) {
// Get function info, elements are partially sorted (it is a tree
// of nested functions serialized as parent followed by serialized children.
var raw_compile_info = %LiveEditGatherCompileInfo(script, source);
// Sort function infos by start position field.
var compile_info = new Array();
var old_index_map = new Array();
for (var i = 0; i < raw_compile_info.length; i++) {
compile_info.push(new liveedit.FunctionCompileInfo(raw_compile_info[i]));
old_index_map.push(i);
}
for (var i = 0; i < compile_info.length; i++) {
var k = i;
for (var j = i + 1; j < compile_info.length; j++) {
if (compile_info[k].start_position > compile_info[j].start_position) {
k = j;
}
}
if (k != i) {
var temp_info = compile_info[k];
var temp_index = old_index_map[k];
compile_info[k] = compile_info[i];
old_index_map[k] = old_index_map[i];
compile_info[i] = temp_info;
old_index_map[i] = temp_index;
}
}
// After sorting update outer_inder field using old_index_map. Also
// set next_sibling_index field.
var current_index = 0;
// The recursive function, that goes over all children of a particular
// node (i.e. function info).
function ResetIndexes(new_parent_index, old_parent_index) {
var previous_sibling = -1;
while (current_index < compile_info.length &&
compile_info[current_index].outer_index == old_parent_index) {
var saved_index = current_index;
compile_info[saved_index].outer_index = new_parent_index;
if (previous_sibling != -1) {
compile_info[previous_sibling].next_sibling_index = saved_index;
}
previous_sibling = saved_index;
current_index++;
ResetIndexes(saved_index, old_index_map[saved_index]);
}
if (previous_sibling != -1) {
compile_info[previous_sibling].next_sibling_index = -1;
}
}
ResetIndexes(-1, -1);
Assert(current_index == compile_info.length);
return compile_info;
}
// Given a positions, finds a function that fully includes the entire change.
function FindChangedFunction(compile_info, offset, len) {
// First condition: function should start before the change region.
// Function #0 (whole-script function) always does, but we want
// one, that is later in this list.
var index = 0;
while (index + 1 < compile_info.length &&
compile_info[index + 1].start_position <= offset) {
index++;
}
// Now we are at the last function that begins before the change
// region. The function that covers entire change region is either
// this function or the enclosing one.
for (; compile_info[index].end_position < offset + len;
index = compile_info[index].outer_index) {
Assert(index != -1);
}
return index;
}
// Variable forward declarations. Preprocessor "Minifier" needs them.
var old_compile_info;
var shared_infos;
// Finds SharedFunctionInfo that corresponds compile info with index
// in old version of the script.
function FindFunctionInfo(index) {
var old_info = old_compile_info[index];
for (var i = 0; i < shared_infos.length; i++) {
var info = shared_infos[i];
if (info.start_position == old_info.start_position &&
info.end_position == old_info.end_position) {
return info;
}
}
}
// Replaces function's Code.
function PatchCode(new_info, shared_info) {
%LiveEditReplaceFunctionCode(new_info.raw_array, shared_info.raw_array);
change_log.push( {function_patched: new_info.function_name} );
}
var change_len_old;
var change_len_new;
// Translate position in old version of script into position in new
// version of script.
function PosTranslator(old_pos) {
if (old_pos <= change_pos) {
return old_pos;
}
if (old_pos >= change_pos + change_len_old) {
return old_pos + change_len_new - change_len_old;
}
return -1;
}
var position_change_array;
var position_patch_report;
function PatchPositions(new_info, shared_info) {
if (!shared_info) {
// TODO: explain what is happening.
return;
}
%LiveEditPatchFunctionPositions(shared_info.raw_array,
position_change_array);
position_patch_report.push( { name: new_info.function_name } );
}
var link_to_old_script_report;
var old_script;
// Makes a function associated with another instance of a script (the
// one representing its old version). This way the function still
// may access its own text.
function LinkToOldScript(shared_info) {
%LiveEditRelinkFunctionToScript(shared_info.raw_array, old_script);
link_to_old_script_report.push( { name: shared_info.function_name } );
}
var old_source = script.source;
var change_len_old = change_len;
var change_len_new = new_str.length;
// Prepare new source string.
var new_source = old_source.substring(0, change_pos) +
new_str + old_source.substring(change_pos + change_len);
// Find all SharedFunctionInfo's that are compiled from this script.
var shared_raw_list = %LiveEditFindSharedFunctionInfosForScript(script);
var shared_infos = new Array();
for (var i = 0; i < shared_raw_list.length; i++) {
shared_infos.push(new liveedit.SharedInfoWrapper(shared_raw_list[i]));
}
// Gather compile information about old version of script.
var old_compile_info = DebugGatherCompileInfo(old_source);
// Gather compile information about new version of script.
var new_compile_info;
try {
new_compile_info = DebugGatherCompileInfo(new_source);
} catch (e) {
throw new liveedit.Failure("Failed to compile new version of script: " + e);
}
// An index of a single function, that is going to have its code replaced.
var function_being_patched =
FindChangedFunction(old_compile_info, change_pos, change_len_old);
// In old and new script versions function with a change should have the
// same indexes.
var function_being_patched2 =
FindChangedFunction(new_compile_info, change_pos, change_len_new);
Assert(function_being_patched == function_being_patched2,
"inconsistent old/new compile info");
// Check that function being patched has the same expectations in a new
// version. Otherwise we cannot safely patch its behavior and should
// choose the outer function instead.
while (!liveedit.CompareFunctionExpectations(
old_compile_info[function_being_patched],
new_compile_info[function_being_patched])) {
Assert(old_compile_info[function_being_patched].outer_index ==
new_compile_info[function_being_patched].outer_index);
function_being_patched =
old_compile_info[function_being_patched].outer_index;
Assert(function_being_patched != -1);
}
// Check that function being patched is not currently on stack.
liveedit.CheckStackActivations(
[ FindFunctionInfo(function_being_patched) ], change_log );
// Committing all changes.
var old_script_name = liveedit.CreateNameForOldScript(script);
// Update the script text and create a new script representing an old
// version of the script.
var old_script = %LiveEditReplaceScript(script, new_source, old_script_name);
PatchCode(new_compile_info[function_being_patched],
FindFunctionInfo(function_being_patched));
var position_patch_report = new Array();
change_log.push( {position_patched: position_patch_report} );
var position_change_array = [ change_pos,
change_pos + change_len_old,
change_pos + change_len_new ];
// Update positions of all outer functions (i.e. all functions, that
// are partially below the function being patched).
for (var i = new_compile_info[function_being_patched].outer_index;
i != -1;
i = new_compile_info[i].outer_index) {
PatchPositions(new_compile_info[i], FindFunctionInfo(i));
}
// Update positions of all functions that are fully below the function
// being patched.
var old_next_sibling =
old_compile_info[function_being_patched].next_sibling_index;
var new_next_sibling =
new_compile_info[function_being_patched].next_sibling_index;
// We simply go over the tail of both old and new lists. Their tails should
// have an identical structure.
if (old_next_sibling == -1) {
Assert(new_next_sibling == -1);
} else {
Assert(old_compile_info.length - old_next_sibling ==
new_compile_info.length - new_next_sibling);
for (var i = old_next_sibling, j = new_next_sibling;
i < old_compile_info.length; i++, j++) {
PatchPositions(new_compile_info[j], FindFunctionInfo(i));
}
}
var link_to_old_script_report = new Array();
change_log.push( { linked_to_old_script: link_to_old_script_report } );
// We need to link to old script all former nested functions.
for (var i = function_being_patched + 1; i < old_next_sibling; i++) {
LinkToOldScript(FindFunctionInfo(i), old_script);
}
}
Debug.LiveEditChangeScript.Assert = function(condition, message) {
if (!condition) {
if (message) {
throw "Assert " + message;
} else {
throw "Assert";
}
}
}
// An object describing function compilation details. Its index fields
// apply to indexes inside array that stores these objects.
Debug.LiveEditChangeScript.FunctionCompileInfo = function(raw_array) {
this.function_name = raw_array[0];
this.start_position = raw_array[1];
this.end_position = raw_array[2];
this.param_num = raw_array[3];
this.code = raw_array[4];
this.scope_info = raw_array[5];
this.outer_index = raw_array[6];
this.next_sibling_index = null;
this.raw_array = raw_array;
}
// A structure describing SharedFunctionInfo.
Debug.LiveEditChangeScript.SharedInfoWrapper = function(raw_array) {
this.function_name = raw_array[0];
this.start_position = raw_array[1];
this.end_position = raw_array[2];
this.info = raw_array[3];
this.raw_array = raw_array;
}
// Adds a suffix to script name to mark that it is old version.
Debug.LiveEditChangeScript.CreateNameForOldScript = function(script) {
// TODO(635): try better than this; support several changes.
return script.name + " (old)";
}
// Compares a function interface old and new version, whether it
// changed or not.
Debug.LiveEditChangeScript.CompareFunctionExpectations =
function(function_info1, function_info2) {
// Check that function has the same number of parameters (there may exist
// an adapter, that won't survive function parameter number change).
if (function_info1.param_num != function_info2.param_num) {
return false;
}
var scope_info1 = function_info1.scope_info;
var scope_info2 = function_info2.scope_info;
if (!scope_info1) {
return !scope_info2;
}
if (scope_info1.length != scope_info2.length) {
return false;
}
// Check that outer scope structure is not changed. Otherwise the function
// will not properly work with existing scopes.
return scope_info1.toString() == scope_info2.toString();
}
// For array of wrapped shared function infos checks that none of them
// have activations on stack (of any thread). Throws a Failure exception
// if this proves to be false.
Debug.LiveEditChangeScript.CheckStackActivations = function(shared_wrapper_list,
change_log) {
var liveedit = Debug.LiveEditChangeScript;
var shared_list = new Array();
for (var i = 0; i < shared_wrapper_list.length; i++) {
shared_list[i] = shared_wrapper_list[i].info;
}
var result = %LiveEditCheckStackActivations(shared_list);
var problems = new Array();
for (var i = 0; i < shared_list.length; i++) {
if (result[i] == liveedit.FunctionPatchabilityStatus.FUNCTION_BLOCKED_ON_STACK) {
var shared = shared_list[i];
var description = {
name: shared.function_name,
start_pos: shared.start_position,
end_pos: shared.end_position
};
problems.push(description);
}
}
if (problems.length > 0) {
change_log.push( { functions_on_stack: problems } );
throw new liveedit.Failure("Blocked by functions on stack");
}
}
// A copy of the FunctionPatchabilityStatus enum from liveedit.h
Debug.LiveEditChangeScript.FunctionPatchabilityStatus = {
FUNCTION_AVAILABLE_FOR_PATCH: 0,
FUNCTION_BLOCKED_ON_STACK: 1
}
// A logical failure in liveedit process. This means that change_log
// is valid and consistent description of what happened.
Debug.LiveEditChangeScript.Failure = function(message) {
this.message = message;
}
Debug.LiveEditChangeScript.Failure.prototype.toString = function() {
return "LiveEdit Failure: " + this.message;
}
// A testing entry.
Debug.LiveEditChangeScript.GetPcFromSourcePos = function(func, source_pos) {
return %GetFunctionCodePositionFromSource(func, source_pos);
}

2
deps/v8/src/liveedit.h

@ -91,7 +91,7 @@ class LiveEdit : AllStatic {
static void PatchFunctionPositions(Handle<JSArray> shared_info_array, static void PatchFunctionPositions(Handle<JSArray> shared_info_array,
Handle<JSArray> position_change_array); Handle<JSArray> position_change_array);
// A copy of this is in liveedit-delay.js. // A copy of this is in liveedit-debugger.js.
enum FunctionPatchabilityStatus { enum FunctionPatchabilityStatus {
FUNCTION_AVAILABLE_FOR_PATCH = 0, FUNCTION_AVAILABLE_FOR_PATCH = 0,
FUNCTION_BLOCKED_ON_STACK = 1 FUNCTION_BLOCKED_ON_STACK = 1

7
deps/v8/src/macros.py

@ -120,10 +120,6 @@ macro TO_STRING_INLINE(arg) = (IS_STRING(%IS_VAR(arg)) ? arg : NonStringToString
# Macros implemented in Python. # Macros implemented in Python.
python macro CHAR_CODE(str) = ord(str[1]); python macro CHAR_CODE(str) = ord(str[1]);
# Accessors for original global properties that ensure they have been loaded.
const ORIGINAL_REGEXP = (global.RegExp, $RegExp);
const ORIGINAL_DATE = (global.Date, $Date);
# Constants used on an array to implement the properties of the RegExp object. # Constants used on an array to implement the properties of the RegExp object.
const REGEXP_NUMBER_OF_CAPTURES = 0; const REGEXP_NUMBER_OF_CAPTURES = 0;
const REGEXP_FIRST_CAPTURE = 3; const REGEXP_FIRST_CAPTURE = 3;
@ -132,6 +128,9 @@ const REGEXP_FIRST_CAPTURE = 3;
# REGEXP_NUMBER_OF_CAPTURES # REGEXP_NUMBER_OF_CAPTURES
macro NUMBER_OF_CAPTURES(array) = ((array)[0]); macro NUMBER_OF_CAPTURES(array) = ((array)[0]);
# Limit according to ECMA 262 15.9.1.1
const MAX_TIME_MS = 8640000000000000;
# Gets the value of a Date object. If arg is not a Date object # Gets the value of a Date object. If arg is not a Date object
# a type error is thrown. # a type error is thrown.
macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError()); macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError());

18
deps/v8/src/messages.js

@ -27,6 +27,16 @@
// ------------------------------------------------------------------- // -------------------------------------------------------------------
//
// Matches Script::Type from objects.h
var TYPE_NATIVE = 0;
var TYPE_EXTENSION = 1;
var TYPE_NORMAL = 2;
// Matches Script::CompilationType from objects.h
var COMPILATION_TYPE_HOST = 0;
var COMPILATION_TYPE_EVAL = 1;
var COMPILATION_TYPE_JSON = 2;
// Lazily initialized. // Lazily initialized.
var kVowelSounds = 0; var kVowelSounds = 0;
@ -634,7 +644,7 @@ CallSite.prototype.isToplevel = function () {
CallSite.prototype.isEval = function () { CallSite.prototype.isEval = function () {
var script = %FunctionGetScript(this.fun); var script = %FunctionGetScript(this.fun);
return script && script.compilation_type == 1; return script && script.compilation_type == COMPILATION_TYPE_EVAL;
}; };
CallSite.prototype.getEvalOrigin = function () { CallSite.prototype.getEvalOrigin = function () {
@ -656,7 +666,7 @@ CallSite.prototype.getFunctionName = function () {
} }
// Maybe this is an evaluation? // Maybe this is an evaluation?
var script = %FunctionGetScript(this.fun); var script = %FunctionGetScript(this.fun);
if (script && script.compilation_type == 1) if (script && script.compilation_type == COMPILATION_TYPE_EVAL)
return "eval"; return "eval";
return null; return null;
}; };
@ -712,7 +722,7 @@ CallSite.prototype.getColumnNumber = function () {
CallSite.prototype.isNative = function () { CallSite.prototype.isNative = function () {
var script = %FunctionGetScript(this.fun); var script = %FunctionGetScript(this.fun);
return script ? (script.type == 0) : false; return script ? (script.type == TYPE_NATIVE) : false;
}; };
CallSite.prototype.getPosition = function () { CallSite.prototype.getPosition = function () {
@ -736,7 +746,7 @@ function FormatEvalOrigin(script) {
var eval_from_script = script.eval_from_script; var eval_from_script = script.eval_from_script;
if (eval_from_script) { if (eval_from_script) {
if (eval_from_script.compilation_type == 1) { if (eval_from_script.compilation_type == COMPILATION_TYPE_EVAL) {
// eval script originated from another eval. // eval script originated from another eval.
eval_origin += " (eval at " + FormatEvalOrigin(eval_from_script) + ")"; eval_origin += " (eval at " + FormatEvalOrigin(eval_from_script) + ")";
} else { } else {

95
deps/v8/src/mips/builtins-mips.cc

@ -74,7 +74,99 @@ void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) { bool is_construct) {
UNIMPLEMENTED_MIPS(); // Called from JSEntryStub::GenerateBody
// Registers:
// a0: entry_address
// a1: function
// a2: reveiver_pointer
// a3: argc
// s0: argv
//
// Stack:
// arguments slots
// handler frame
// entry frame
// callee saved registers + ra
// 4 args slots
// args
// Clear the context before we push it when entering the JS frame.
__ li(cp, Operand(0));
// Enter an internal frame.
__ EnterInternalFrame();
// Set up the context from the function argument.
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// Set up the roots register.
ExternalReference roots_address = ExternalReference::roots_address();
__ li(s6, Operand(roots_address));
// Push the function and the receiver onto the stack.
__ MultiPushReversed(a1.bit() | a2.bit());
// Copy arguments to the stack in a loop.
// a3: argc
// s0: argv, ie points to first arg
Label loop, entry;
__ sll(t0, a3, kPointerSizeLog2);
__ add(t2, s0, t0);
__ b(&entry);
__ nop(); // Branch delay slot nop.
// t2 points past last arg.
__ bind(&loop);
__ lw(t0, MemOperand(s0)); // Read next parameter.
__ addiu(s0, s0, kPointerSize);
__ lw(t0, MemOperand(t0)); // Dereference handle.
__ Push(t0); // Push parameter.
__ bind(&entry);
__ Branch(ne, &loop, s0, Operand(t2));
// Registers:
// a0: entry_address
// a1: function
// a2: reveiver_pointer
// a3: argc
// s0: argv
// s6: roots_address
//
// Stack:
// arguments
// receiver
// function
// arguments slots
// handler frame
// entry frame
// callee saved registers + ra
// 4 args slots
// args
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
__ LoadRoot(t4, Heap::kUndefinedValueRootIndex);
__ mov(s1, t4);
__ mov(s2, t4);
__ mov(s3, t4);
__ mov(s4, s4);
__ mov(s5, t4);
// s6 holds the root address. Do not clobber.
// s7 is cp. Do not init.
// Invoke the code and pass argc as a0.
__ mov(a0, a3);
if (is_construct) {
UNIMPLEMENTED_MIPS();
__ break_(0x164);
} else {
ParameterCount actual(a0);
__ InvokeFunction(a1, actual, CALL_FUNCTION);
}
__ LeaveInternalFrame();
__ Jump(ra);
} }
@ -100,6 +192,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS(); UNIMPLEMENTED_MIPS();
__ break_(0x201);
} }

26
deps/v8/src/mips/codegen-mips-inl.h

@ -36,7 +36,31 @@ namespace internal {
// Platform-specific inline functions. // Platform-specific inline functions.
void DeferredCode::Jump() { __ b(&entry_label_); } void DeferredCode::Jump() {
__ b(&entry_label_);
__ nop();
}
void Reference::GetValueAndSpill() {
GetValue();
}
void CodeGenerator::VisitAndSpill(Statement* statement) {
Visit(statement);
}
void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
VisitStatements(statements);
}
void CodeGenerator::LoadAndSpill(Expression* expression) {
Load(expression);
}
#undef __ #undef __

953
deps/v8/src/mips/codegen-mips.cc

File diff suppressed because it is too large

119
deps/v8/src/mips/codegen-mips.h

@ -42,7 +42,77 @@ enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF }; enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// ------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Reference support
// A reference is a C++ stack-allocated object that keeps an ECMA
// reference on the execution stack while in scope. For variables
// the reference is empty, indicating that it isn't necessary to
// store state on the stack for keeping track of references to those.
// For properties, we keep either one (named) or two (indexed) values
// on the execution stack to represent the reference.
class Reference BASE_EMBEDDED {
public:
// The values of the types is important, see size().
enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
Reference(CodeGenerator* cgen,
Expression* expression,
bool persist_after_get = false);
~Reference();
Expression* expression() const { return expression_; }
Type type() const { return type_; }
void set_type(Type value) {
ASSERT_EQ(ILLEGAL, type_);
type_ = value;
}
void set_unloaded() {
ASSERT_NE(ILLEGAL, type_);
ASSERT_NE(UNLOADED, type_);
type_ = UNLOADED;
}
// The size the reference takes up on the stack.
int size() const {
return (type_ < SLOT) ? 0 : type_;
}
bool is_illegal() const { return type_ == ILLEGAL; }
bool is_slot() const { return type_ == SLOT; }
bool is_property() const { return type_ == NAMED || type_ == KEYED; }
bool is_unloaded() const { return type_ == UNLOADED; }
// Return the name. Only valid for named property references.
Handle<String> GetName();
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
// the expression stack, and it is consumed by the call unless the
// reference is for a compound assignment.
// If the reference is not consumed, it is left in place under its value.
void GetValue();
// Generate code to pop a reference, push the value of the reference,
// and then spill the stack frame.
inline void GetValueAndSpill();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
// on the expression stack. The value is stored in the location specified
// by the reference, and is left on top of the stack, after the reference
// is popped from beneath it (unloaded).
void SetValue(InitState init_state);
private:
CodeGenerator* cgen_;
Expression* expression_;
Type type_;
// Keep the reference on the stack after get, so it can be used by set later.
bool persist_after_get_;
};
// -----------------------------------------------------------------------------
// Code generation state // Code generation state
// The state is passed down the AST by the code generator (and back up, in // The state is passed down the AST by the code generator (and back up, in
@ -89,7 +159,7 @@ class CodeGenState BASE_EMBEDDED {
// ------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// CodeGenerator // CodeGenerator
class CodeGenerator: public AstVisitor { class CodeGenerator: public AstVisitor {
@ -152,7 +222,7 @@ class CodeGenerator: public AstVisitor {
// Number of instructions used for the JS return sequence. The constant is // Number of instructions used for the JS return sequence. The constant is
// used by the debugger to patch the JS return sequence. // used by the debugger to patch the JS return sequence.
static const int kJSReturnSequenceLength = 6; static const int kJSReturnSequenceLength = 7;
// If the name is an inline runtime function call return the number of // If the name is an inline runtime function call return the number of
// expected arguments. Otherwise return -1. // expected arguments. Otherwise return -1.
@ -186,9 +256,51 @@ class CodeGenerator: public AstVisitor {
AST_NODE_LIST(DEF_VISIT) AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT #undef DEF_VISIT
// Visit a statement and then spill the virtual frame if control flow can
// reach the end of the statement (ie, it does not exit via break,
// continue, return, or throw). This function is used temporarily while
// the code generator is being transformed.
inline void VisitAndSpill(Statement* statement);
// Visit a list of statements and then spill the virtual frame if control
// flow can reach the end of the list.
inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
// Main code generation function // Main code generation function
void Generate(CompilationInfo* info); void Generate(CompilationInfo* info);
// The following are used by class Reference.
void LoadReference(Reference* ref);
void UnloadReference(Reference* ref);
MemOperand ContextOperand(Register context, int index) const {
return MemOperand(context, Context::SlotOffset(index));
}
MemOperand SlotOperand(Slot* slot, Register tmp);
// Expressions
MemOperand GlobalObject() const {
return ContextOperand(cp, Context::GLOBAL_INDEX);
}
void LoadCondition(Expression* x,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_cc);
void Load(Expression* x);
void LoadGlobal();
// Generate code to push the value of an expression on top of the frame
// and then spill the frame fully to memory. This function is used
// temporarily while the code generator is being transformed.
inline void LoadAndSpill(Expression* expression);
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
// Store the value on top of the stack to a slot.
void StoreToSlot(Slot* slot, InitState init_state);
struct InlineRuntimeLUT { struct InlineRuntimeLUT {
void (CodeGenerator::*method)(ZoneList<Expression*>*); void (CodeGenerator::*method)(ZoneList<Expression*>*);
const char* name; const char* name;
@ -290,7 +402,6 @@ class CodeGenerator: public AstVisitor {
CompilationInfo* info_; CompilationInfo* info_;
// Code generation state // Code generation state
Scope* scope_;
VirtualFrame* frame_; VirtualFrame* frame_;
RegisterAllocator* allocator_; RegisterAllocator* allocator_;
Condition cc_reg_; Condition cc_reg_;

3
deps/v8/src/mips/frames-mips.cc

@ -91,8 +91,7 @@ Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
Address InternalFrame::GetCallerStackPointer() const { Address InternalFrame::GetCallerStackPointer() const {
UNIMPLEMENTED_MIPS(); return fp() + StandardFrameConstants::kCallerSPOffset;
return static_cast<Address>(NULL); // UNIMPLEMENTED RETURN
} }

2
deps/v8/src/mips/frames-mips.h

@ -104,7 +104,7 @@ class ExitFrameConstants : public AllStatic {
static const int kCallerPCOffset = +1 * kPointerSize; static const int kCallerPCOffset = +1 * kPointerSize;
// FP-relative displacement of the caller's SP. // FP-relative displacement of the caller's SP.
static const int kCallerSPDisplacement = +4 * kPointerSize; static const int kCallerSPDisplacement = +3 * kPointerSize;
}; };

41
deps/v8/src/mips/ic-mips.cc

@ -74,6 +74,47 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) { void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
UNIMPLEMENTED_MIPS(); UNIMPLEMENTED_MIPS();
// Registers:
// a2: name
// ra: return address
// Get the receiver of the function from the stack.
__ lw(a3, MemOperand(sp, argc*kPointerSize));
__ EnterInternalFrame();
// Push the receiver and the name of the function.
__ MultiPush(a2.bit() | a3.bit());
// Call the entry.
__ li(a0, Operand(2));
__ li(a1, Operand(ExternalReference(IC_Utility(kCallIC_Miss))));
CEntryStub stub(1);
__ CallStub(&stub);
// Move result to r1 and leave the internal frame.
__ mov(a1, v0);
__ LeaveInternalFrame();
// Check if the receiver is a global object of some sort.
Label invoke, global;
__ lw(a2, MemOperand(sp, argc * kPointerSize));
__ andi(t0, a2, kSmiTagMask);
__ Branch(eq, &invoke, t0, Operand(zero_reg));
__ GetObjectType(a2, a3, a3);
__ Branch(eq, &global, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
__ Branch(ne, &invoke, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
// Patch the receiver on the stack.
__ bind(&global);
__ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
__ sw(a2, MemOperand(sp, argc * kPointerSize));
// Invoke the function.
ParameterCount actual(argc);
__ bind(&invoke);
__ InvokeFunction(a1, actual, JUMP_FUNCTION);
} }
// Defined in ic.cc. // Defined in ic.cc.

92
deps/v8/src/mips/jump-target-mips.cc

@ -42,7 +42,37 @@ namespace internal {
#define __ ACCESS_MASM(cgen()->masm()) #define __ ACCESS_MASM(cgen()->masm())
void JumpTarget::DoJump() { void JumpTarget::DoJump() {
UNIMPLEMENTED_MIPS(); ASSERT(cgen()->has_valid_frame());
// Live non-frame registers are not allowed at unconditional jumps
// because we have no way of invalidating the corresponding results
// which are still live in the C++ code.
ASSERT(cgen()->HasValidEntryRegisters());
if (is_bound()) {
// Backward jump. There already a frame expectation at the target.
ASSERT(direction_ == BIDIRECTIONAL);
cgen()->frame()->MergeTo(entry_frame_);
cgen()->DeleteFrame();
} else {
// Use the current frame as the expected one at the target if necessary.
if (entry_frame_ == NULL) {
entry_frame_ = cgen()->frame();
RegisterFile empty;
cgen()->SetFrame(NULL, &empty);
} else {
cgen()->frame()->MergeTo(entry_frame_);
cgen()->DeleteFrame();
}
// The predicate is_linked() should be made true. Its implementation
// detects the presence of a frame pointer in the reaching_frames_ list.
if (!is_linked()) {
reaching_frames_.Add(NULL);
ASSERT(is_linked());
}
}
__ b(&entry_label_);
__ nop(); // Branch delay slot nop.
} }
@ -57,12 +87,47 @@ void JumpTarget::Call() {
void JumpTarget::DoBind() { void JumpTarget::DoBind() {
UNIMPLEMENTED_MIPS(); ASSERT(!is_bound());
// Live non-frame registers are not allowed at the start of a basic
// block.
ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
if (cgen()->has_valid_frame()) {
// If there is a current frame we can use it on the fall through.
if (entry_frame_ == NULL) {
entry_frame_ = new VirtualFrame(cgen()->frame());
} else {
ASSERT(cgen()->frame()->Equals(entry_frame_));
}
} else {
// If there is no current frame we must have an entry frame which we can
// copy.
ASSERT(entry_frame_ != NULL);
RegisterFile empty;
cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
}
// The predicate is_linked() should be made false. Its implementation
// detects the presence (or absence) of frame pointers in the
// reaching_frames_ list. If we inserted a bogus frame to make
// is_linked() true, remove it now.
if (is_linked()) {
reaching_frames_.Clear();
}
__ bind(&entry_label_);
} }
void BreakTarget::Jump() { void BreakTarget::Jump() {
UNIMPLEMENTED_MIPS(); // On ARM we do not currently emit merge code for jumps, so we need to do
// it explicitly here. The only merging necessary is to drop extra
// statement state from the stack.
ASSERT(cgen()->has_valid_frame());
int count = cgen()->frame()->height() - expected_height_;
cgen()->frame()->Drop(count);
DoJump();
} }
@ -72,7 +137,26 @@ void BreakTarget::Jump(Result* arg) {
void BreakTarget::Bind() { void BreakTarget::Bind() {
UNIMPLEMENTED_MIPS(); #ifdef DEBUG
// All the forward-reaching frames should have been adjusted at the
// jumps to this target.
for (int i = 0; i < reaching_frames_.length(); i++) {
ASSERT(reaching_frames_[i] == NULL ||
reaching_frames_[i]->height() == expected_height_);
}
#endif
// Drop leftover statement state from the frame before merging, even
// on the fall through. This is so we can bind the return target
// with state on the frame.
if (cgen()->has_valid_frame()) {
int count = cgen()->frame()->height() - expected_height_;
// On ARM we do not currently emit merge code at binding sites, so we need
// to do it explicitly here. The only merging necessary is to drop extra
// statement state from the stack.
cgen()->frame()->Drop(count);
}
DoBind();
} }

465
deps/v8/src/mips/macro-assembler-mips.cc

@ -55,7 +55,7 @@ void MacroAssembler::Jump(Register target, Condition cond,
void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond, Register r1, const Operand& r2) { Condition cond, Register r1, const Operand& r2) {
Jump(Operand(target), cond, r1, r2); Jump(Operand(target, rmode), cond, r1, r2);
} }
@ -81,7 +81,7 @@ void MacroAssembler::Call(Register target,
void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode, void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
Condition cond, Register r1, const Operand& r2) { Condition cond, Register r1, const Operand& r2) {
Call(Operand(target), cond, r1, r2); Call(Operand(target, rmode), cond, r1, r2);
} }
@ -106,7 +106,7 @@ void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2) {
void MacroAssembler::LoadRoot(Register destination, void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index) { Heap::RootListIndex index) {
lw(destination, MemOperand(s4, index << kPointerSizeLog2)); lw(destination, MemOperand(s6, index << kPointerSizeLog2));
} }
void MacroAssembler::LoadRoot(Register destination, void MacroAssembler::LoadRoot(Register destination,
@ -114,8 +114,7 @@ void MacroAssembler::LoadRoot(Register destination,
Condition cond, Condition cond,
Register src1, const Operand& src2) { Register src1, const Operand& src2) {
Branch(NegateCondition(cond), 2, src1, src2); Branch(NegateCondition(cond), 2, src1, src2);
nop(); lw(destination, MemOperand(s6, index << kPointerSizeLog2));
lw(destination, MemOperand(s4, index << kPointerSizeLog2));
} }
@ -320,7 +319,6 @@ void MacroAssembler::movn(Register rd, Register rt) {
} }
// load wartd in a register
void MacroAssembler::li(Register rd, Operand j, bool gen2instr) { void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
ASSERT(!j.is_reg()); ASSERT(!j.is_reg());
@ -372,7 +370,7 @@ void MacroAssembler::MultiPush(RegList regs) {
int16_t NumToPush = NumberOfBitsSet(regs); int16_t NumToPush = NumberOfBitsSet(regs);
addiu(sp, sp, -4 * NumToPush); addiu(sp, sp, -4 * NumToPush);
for (int16_t i = 0; i < kNumRegisters; i++) { for (int16_t i = kNumRegisters; i > 0; i--) {
if ((regs & (1 << i)) != 0) { if ((regs & (1 << i)) != 0) {
sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved))); sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
} }
@ -385,7 +383,7 @@ void MacroAssembler::MultiPushReversed(RegList regs) {
int16_t NumToPush = NumberOfBitsSet(regs); int16_t NumToPush = NumberOfBitsSet(regs);
addiu(sp, sp, -4 * NumToPush); addiu(sp, sp, -4 * NumToPush);
for (int16_t i = kNumRegisters; i > 0; i--) { for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) { if ((regs & (1 << i)) != 0) {
sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved))); sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
} }
@ -396,7 +394,7 @@ void MacroAssembler::MultiPushReversed(RegList regs) {
void MacroAssembler::MultiPop(RegList regs) { void MacroAssembler::MultiPop(RegList regs) {
int16_t NumSaved = 0; int16_t NumSaved = 0;
for (int16_t i = kNumRegisters; i > 0; i--) { for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) { if ((regs & (1 << i)) != 0) {
lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++))); lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
} }
@ -408,7 +406,7 @@ void MacroAssembler::MultiPop(RegList regs) {
void MacroAssembler::MultiPopReversed(RegList regs) { void MacroAssembler::MultiPopReversed(RegList regs) {
int16_t NumSaved = 0; int16_t NumSaved = 0;
for (int16_t i = 0; i < kNumRegisters; i++) { for (int16_t i = kNumRegisters; i > 0; i--) {
if ((regs & (1 << i)) != 0) { if ((regs & (1 << i)) != 0) {
lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++))); lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
} }
@ -484,6 +482,8 @@ void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs,
default: default:
UNREACHABLE(); UNREACHABLE();
} }
// Emit a nop in the branch delay slot.
nop();
} }
@ -550,6 +550,8 @@ void MacroAssembler::Branch(Condition cond, Label* L, Register rs,
default: default:
UNREACHABLE(); UNREACHABLE();
} }
// Emit a nop in the branch delay slot.
nop();
} }
@ -629,6 +631,8 @@ void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
default: default:
UNREACHABLE(); UNREACHABLE();
} }
// Emit a nop in the branch delay slot.
nop();
} }
@ -704,6 +708,8 @@ void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs,
default: default:
UNREACHABLE(); UNREACHABLE();
} }
// Emit a nop in the branch delay slot.
nop();
} }
@ -714,7 +720,6 @@ void MacroAssembler::Jump(const Operand& target,
jr(target.rm()); jr(target.rm());
} else { } else {
Branch(NegateCondition(cond), 2, rs, rt); Branch(NegateCondition(cond), 2, rs, rt);
nop();
jr(target.rm()); jr(target.rm());
} }
} else { // !target.is_reg() } else { // !target.is_reg()
@ -723,20 +728,20 @@ void MacroAssembler::Jump(const Operand& target,
j(target.imm32_); j(target.imm32_);
} else { } else {
Branch(NegateCondition(cond), 2, rs, rt); Branch(NegateCondition(cond), 2, rs, rt);
nop(); j(target.imm32_); // Will generate only one instruction.
j(target.imm32_); // will generate only one instruction.
} }
} else { // MustUseAt(target) } else { // MustUseAt(target)
li(at, rt); li(at, target);
if (cond == cc_always) { if (cond == cc_always) {
jr(at); jr(at);
} else { } else {
Branch(NegateCondition(cond), 2, rs, rt); Branch(NegateCondition(cond), 2, rs, rt);
nop(); jr(at); // Will generate only one instruction.
jr(at); // will generate only one instruction.
} }
} }
} }
// Emit a nop in the branch delay slot.
nop();
} }
@ -747,7 +752,6 @@ void MacroAssembler::Call(const Operand& target,
jalr(target.rm()); jalr(target.rm());
} else { } else {
Branch(NegateCondition(cond), 2, rs, rt); Branch(NegateCondition(cond), 2, rs, rt);
nop();
jalr(target.rm()); jalr(target.rm());
} }
} else { // !target.is_reg() } else { // !target.is_reg()
@ -756,20 +760,20 @@ void MacroAssembler::Call(const Operand& target,
jal(target.imm32_); jal(target.imm32_);
} else { } else {
Branch(NegateCondition(cond), 2, rs, rt); Branch(NegateCondition(cond), 2, rs, rt);
nop(); jal(target.imm32_); // Will generate only one instruction.
jal(target.imm32_); // will generate only one instruction.
} }
} else { // MustUseAt(target) } else { // MustUseAt(target)
li(at, rt); li(at, target);
if (cond == cc_always) { if (cond == cc_always) {
jalr(at); jalr(at);
} else { } else {
Branch(NegateCondition(cond), 2, rs, rt); Branch(NegateCondition(cond), 2, rs, rt);
nop(); jalr(at); // Will generate only one instruction.
jalr(at); // will generate only one instruction.
} }
} }
} }
// Emit a nop in the branch delay slot.
nop();
} }
void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) { void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
@ -802,7 +806,58 @@ void MacroAssembler::Call(Label* target) {
void MacroAssembler::PushTryHandler(CodeLocation try_location, void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) { HandlerType type) {
UNIMPLEMENTED_MIPS(); // Adjust this code if not the case.
ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// The return address is passed in register ra.
if (try_location == IN_JAVASCRIPT) {
if (type == TRY_CATCH_HANDLER) {
li(t0, Operand(StackHandler::TRY_CATCH));
} else {
li(t0, Operand(StackHandler::TRY_FINALLY));
}
ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
&& StackHandlerConstants::kFPOffset == 2 * kPointerSize
&& StackHandlerConstants::kPCOffset == 3 * kPointerSize
&& StackHandlerConstants::kNextOffset == 0 * kPointerSize);
// Save the current handler as the next handler.
LoadExternalReference(t2, ExternalReference(Top::k_handler_address));
lw(t1, MemOperand(t2));
addiu(sp, sp, -StackHandlerConstants::kSize);
sw(ra, MemOperand(sp, 12));
sw(fp, MemOperand(sp, 8));
sw(t0, MemOperand(sp, 4));
sw(t1, MemOperand(sp, 0));
// Link this handler as the new current one.
sw(sp, MemOperand(t2));
} else {
// Must preserve a0-a3, and s0 (argv).
ASSERT(try_location == IN_JS_ENTRY);
ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
&& StackHandlerConstants::kFPOffset == 2 * kPointerSize
&& StackHandlerConstants::kPCOffset == 3 * kPointerSize
&& StackHandlerConstants::kNextOffset == 0 * kPointerSize);
// The frame pointer does not point to a JS frame so we save NULL
// for fp. We expect the code throwing an exception to check fp
// before dereferencing it to restore the context.
li(t0, Operand(StackHandler::ENTRY));
// Save the current handler as the next handler.
LoadExternalReference(t2, ExternalReference(Top::k_handler_address));
lw(t1, MemOperand(t2));
addiu(sp, sp, -StackHandlerConstants::kSize);
sw(ra, MemOperand(sp, 12));
sw(zero_reg, MemOperand(sp, 8));
sw(t0, MemOperand(sp, 4));
sw(t1, MemOperand(sp, 0));
// Link this handler as the new current one.
sw(sp, MemOperand(t2));
}
} }
@ -812,12 +867,233 @@ void MacroAssembler::PopTryHandler() {
// --------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Activation frames // Activation frames
void MacroAssembler::SetupAlignedCall(Register scratch, int arg_count) {
Label extra_push, end;
andi(scratch, sp, 7);
// We check for args and receiver size on the stack, all of them word sized.
// We add one for sp, that we also want to store on the stack.
if (((arg_count + 1) % kPointerSizeLog2) == 0) {
Branch(ne, &extra_push, at, Operand(zero_reg));
} else { // ((arg_count + 1) % 2) == 1
Branch(eq, &extra_push, at, Operand(zero_reg));
}
// Save sp on the stack.
mov(scratch, sp);
Push(scratch);
b(&end);
// Align before saving sp on the stack.
bind(&extra_push);
mov(scratch, sp);
addiu(sp, sp, -8);
sw(scratch, MemOperand(sp));
// The stack is aligned and sp is stored on the top.
bind(&end);
}
void MacroAssembler::ReturnFromAlignedCall() {
lw(sp, MemOperand(sp));
}
// -----------------------------------------------------------------------------
// JavaScript invokes
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
Register code_reg,
Label* done,
InvokeFlag flag) {
bool definitely_matches = false;
Label regular_invoke;
// Check whether the expected and actual arguments count match. If not,
// setup registers according to contract with ArgumentsAdaptorTrampoline:
// a0: actual arguments count
// a1: function (passed through to callee)
// a2: expected arguments count
// a3: callee code entry
// The code below is made a lot easier because the calling code already sets
// up actual and expected registers according to the contract if values are
// passed in registers.
ASSERT(actual.is_immediate() || actual.reg().is(a0));
ASSERT(expected.is_immediate() || expected.reg().is(a2));
ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
if (expected.is_immediate()) {
ASSERT(actual.is_immediate());
if (expected.immediate() == actual.immediate()) {
definitely_matches = true;
} else {
li(a0, Operand(actual.immediate()));
const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
if (expected.immediate() == sentinel) {
// Don't worry about adapting arguments for builtins that
// don't want that done. Skip adaption code by making it look
// like we have a match between expected and actual number of
// arguments.
definitely_matches = true;
} else {
li(a2, Operand(expected.immediate()));
}
}
} else if (actual.is_immediate()) {
Branch(eq, &regular_invoke, expected.reg(), Operand(actual.immediate()));
li(a0, Operand(actual.immediate()));
} else {
Branch(eq, &regular_invoke, expected.reg(), Operand(actual.reg()));
}
if (!definitely_matches) {
if (!code_constant.is_null()) {
li(a3, Operand(code_constant));
addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
}
ExternalReference adaptor(Builtins::ArgumentsAdaptorTrampoline);
if (flag == CALL_FUNCTION) {
CallBuiltin(adaptor);
b(done);
nop();
} else {
JumpToBuiltin(adaptor);
}
bind(&regular_invoke);
}
}
void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag) {
Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
if (flag == CALL_FUNCTION) {
Call(code);
} else {
ASSERT(flag == JUMP_FUNCTION);
Jump(code);
}
// Continue here if InvokePrologue does handle the invocation due to
// mismatched parameter counts.
bind(&done);
}
void MacroAssembler::InvokeCode(Handle<Code> code,
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
InvokeFlag flag) {
Label done;
InvokePrologue(expected, actual, code, no_reg, &done, flag);
if (flag == CALL_FUNCTION) {
Call(code, rmode);
} else {
Jump(code, rmode);
}
// Continue here if InvokePrologue does handle the invocation due to
// mismatched parameter counts.
bind(&done);
}
void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag) {
// Contract with called JS functions requires that function is passed in a1.
ASSERT(function.is(a1));
Register expected_reg = a2;
Register code_reg = a3;
lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
lw(expected_reg,
FieldMemOperand(code_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
lw(code_reg,
MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
addiu(code_reg, code_reg, Code::kHeaderSize - kHeapObjectTag);
ParameterCount expected(expected_reg);
InvokeCode(code_reg, expected, actual, flag);
}
// ---------------------------------------------------------------------------
// Support functions.
void MacroAssembler::GetObjectType(Register function,
Register map,
Register type_reg) {
lw(map, FieldMemOperand(function, HeapObject::kMapOffset));
lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
}
void MacroAssembler::CallBuiltin(ExternalReference builtin_entry) {
// Load builtin address.
LoadExternalReference(t9, builtin_entry);
lw(t9, MemOperand(t9)); // Deref address.
addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
// Call and allocate arguments slots.
jalr(t9);
// Use the branch delay slot to allocated argument slots.
addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize);
}
void MacroAssembler::CallBuiltin(Register target) {
// Target already holds target address.
// Call and allocate arguments slots.
jalr(target);
// Use the branch delay slot to allocated argument slots.
addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize);
}
void MacroAssembler::JumpToBuiltin(ExternalReference builtin_entry) {
// Load builtin address.
LoadExternalReference(t9, builtin_entry);
lw(t9, MemOperand(t9)); // Deref address.
addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
// Call and allocate arguments slots.
jr(t9);
// Use the branch delay slot to allocated argument slots.
addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
}
void MacroAssembler::JumpToBuiltin(Register target) {
// t9 already holds target address.
// Call and allocate arguments slots.
jr(t9);
// Use the branch delay slot to allocated argument slots.
addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
}
// -----------------------------------------------------------------------------
// Runtime calls
void MacroAssembler::CallStub(CodeStub* stub, Condition cond, void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
Register r1, const Operand& r2) { Register r1, const Operand& r2) {
UNIMPLEMENTED_MIPS(); ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2);
} }
@ -826,13 +1102,38 @@ void MacroAssembler::StubReturn(int argc) {
} }
void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
addiu(sp, sp, num_arguments * kPointerSize);
}
LoadRoot(v0, Heap::kUndefinedValueRootIndex);
}
void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) { void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
UNIMPLEMENTED_MIPS(); // All parameters are on the stack. v0 has the return value after call.
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
if (f->nargs >= 0 && f->nargs != num_arguments) {
IllegalOperation(num_arguments);
return;
}
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
li(a0, num_arguments);
LoadExternalReference(a1, ExternalReference(f));
CEntryStub stub(1);
CallStub(&stub);
} }
void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
UNIMPLEMENTED_MIPS(); CallRuntime(Runtime::FunctionForId(fid), num_arguments);
} }
@ -891,6 +1192,8 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
} }
// -----------------------------------------------------------------------------
// Debugging
void MacroAssembler::Assert(Condition cc, const char* msg, void MacroAssembler::Assert(Condition cc, const char* msg,
Register rs, Operand rt) { Register rs, Operand rt) {
@ -908,5 +1211,113 @@ void MacroAssembler::Abort(const char* msg) {
UNIMPLEMENTED_MIPS(); UNIMPLEMENTED_MIPS();
} }
void MacroAssembler::EnterFrame(StackFrame::Type type) {
addiu(sp, sp, -5 * kPointerSize);
li(t0, Operand(Smi::FromInt(type)));
li(t1, Operand(CodeObject()));
sw(ra, MemOperand(sp, 4 * kPointerSize));
sw(fp, MemOperand(sp, 3 * kPointerSize));
sw(cp, MemOperand(sp, 2 * kPointerSize));
sw(t0, MemOperand(sp, 1 * kPointerSize));
sw(t1, MemOperand(sp, 0 * kPointerSize));
addiu(fp, sp, 3 * kPointerSize);
}
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
mov(sp, fp);
lw(fp, MemOperand(sp, 0 * kPointerSize));
lw(ra, MemOperand(sp, 1 * kPointerSize));
addiu(sp, sp, 2 * kPointerSize);
}
void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode,
Register hold_argc,
Register hold_argv,
Register hold_function) {
// Compute the argv pointer and keep it in a callee-saved register.
// a0 is argc.
sll(t0, a0, kPointerSizeLog2);
add(hold_argv, sp, t0);
addi(hold_argv, hold_argv, -kPointerSize);
// Compute callee's stack pointer before making changes and save it as
// t1 register so that it is restored as sp register on exit, thereby
// popping the args.
// t1 = sp + kPointerSize * #args
add(t1, sp, t0);
// Align the stack at this point.
AlignStack(0);
// Save registers.
addiu(sp, sp, -12);
sw(t1, MemOperand(sp, 8));
sw(ra, MemOperand(sp, 4));
sw(fp, MemOperand(sp, 0));
mov(fp, sp); // Setup new frame pointer.
// Push debug marker.
if (mode == ExitFrame::MODE_DEBUG) {
Push(zero_reg);
} else {
li(t0, Operand(CodeObject()));
Push(t0);
}
// Save the frame pointer and the context in top.
LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
sw(fp, MemOperand(t0));
LoadExternalReference(t0, ExternalReference(Top::k_context_address));
sw(cp, MemOperand(t0));
// Setup argc and the builtin function in callee-saved registers.
mov(hold_argc, a0);
mov(hold_function, a1);
}
void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
// Clear top frame.
LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address));
sw(zero_reg, MemOperand(t0));
// Restore current context from top and clear it in debug mode.
LoadExternalReference(t0, ExternalReference(Top::k_context_address));
lw(cp, MemOperand(t0));
#ifdef DEBUG
sw(a3, MemOperand(t0));
#endif
// Pop the arguments, restore registers, and return.
mov(sp, fp); // Respect ABI stack constraint.
lw(fp, MemOperand(sp, 0));
lw(ra, MemOperand(sp, 4));
lw(sp, MemOperand(sp, 8));
jr(ra);
nop(); // Branch delay slot nop.
}
void MacroAssembler::AlignStack(int offset) {
// On MIPS an offset of 0 aligns to 0 modulo 8 bytes,
// and an offset of 1 aligns to 4 modulo 8 bytes.
int activation_frame_alignment = OS::ActivationFrameAlignment();
if (activation_frame_alignment != kPointerSize) {
// This code needs to be made more general if this assert doesn't hold.
ASSERT(activation_frame_alignment == 2 * kPointerSize);
if (offset == 0) {
andi(t0, sp, activation_frame_alignment - 1);
Push(zero_reg, eq, t0, zero_reg);
} else {
andi(t0, sp, activation_frame_alignment - 1);
addiu(t0, t0, -4);
Push(zero_reg, eq, t0, zero_reg);
}
}
}
} } // namespace v8::internal } } // namespace v8::internal

95
deps/v8/src/mips/macro-assembler-mips.h

@ -41,6 +41,7 @@ class JumpTarget;
// unless we know exactly what we do. // unless we know exactly what we do.
// Registers aliases // Registers aliases
// cp is assumed to be a callee saved register.
const Register cp = s7; // JavaScript context pointer const Register cp = s7; // JavaScript context pointer
const Register fp = s8_fp; // Alias fp const Register fp = s8_fp; // Alias fp
@ -102,10 +103,10 @@ class MacroAssembler: public Assembler {
// Jump unconditionally to given label. // Jump unconditionally to given label.
// We NEED a nop in the branch delay slot, as it used by v8, for example in // We NEED a nop in the branch delay slot, as it used by v8, for example in
// CodeGenerator::ProcessDeferred(). // CodeGenerator::ProcessDeferred().
// Currently the branch delay slot is filled by the MacroAssembler.
// Use rather b(Label) for code generation. // Use rather b(Label) for code generation.
void jmp(Label* L) { void jmp(Label* L) {
Branch(cc_always, L); Branch(cc_always, L);
nop();
} }
// Load an object from the root table. // Load an object from the root table.
@ -115,6 +116,11 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index, Heap::RootListIndex index,
Condition cond, Register src1, const Operand& src2); Condition cond, Register src1, const Operand& src2);
// Load an external reference.
void LoadExternalReference(Register reg, ExternalReference ext) {
li(reg, Operand(ext));
}
// Sets the remembered set bit for [address+offset]. // Sets the remembered set bit for [address+offset].
void RecordWrite(Register object, Register offset, Register scratch); void RecordWrite(Register object, Register offset, Register scratch);
@ -191,7 +197,6 @@ class MacroAssembler: public Assembler {
void Push(Register src, Condition cond, Register tst1, Register tst2) { void Push(Register src, Condition cond, Register tst1, Register tst2) {
// Since we don't have conditionnal execution we use a Branch. // Since we don't have conditionnal execution we use a Branch.
Branch(cond, 3, tst1, Operand(tst2)); Branch(cond, 3, tst1, Operand(tst2));
nop();
Addu(sp, sp, Operand(-kPointerSize)); Addu(sp, sp, Operand(-kPointerSize));
sw(src, MemOperand(sp, 0)); sw(src, MemOperand(sp, 0));
} }
@ -209,6 +214,53 @@ class MacroAssembler: public Assembler {
} }
// ---------------------------------------------------------------------------
// Activation frames
void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
// Enter specific kind of exit frame; either EXIT or
// EXIT_DEBUG. Expects the number of arguments in register a0 and
// the builtin function to call in register a1.
// On output hold_argc, hold_function, and hold_argv are setup.
void EnterExitFrame(ExitFrame::Mode mode,
Register hold_argc,
Register hold_argv,
Register hold_function);
// Leave the current exit frame. Expects the return value in v0.
void LeaveExitFrame(ExitFrame::Mode mode);
// Align the stack by optionally pushing a Smi zero.
void AlignStack(int offset);
void SetupAlignedCall(Register scratch, int arg_count = 0);
void ReturnFromAlignedCall();
// ---------------------------------------------------------------------------
// JavaScript invokes
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag);
void InvokeCode(Handle<Code> code,
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
InvokeFlag flag);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag);
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Debugger Support // Debugger Support
@ -227,8 +279,7 @@ class MacroAssembler: public Assembler {
// Exception handling // Exception handling
// Push a new try handler and link into try handler chain. // Push a new try handler and link into try handler chain.
// The return address must be passed in register lr. // The return address must be passed in register ra.
// On exit, r0 contains TOS (code slot).
void PushTryHandler(CodeLocation try_location, HandlerType type); void PushTryHandler(CodeLocation try_location, HandlerType type);
// Unlink the stack handler on top of the stack from the try handler chain. // Unlink the stack handler on top of the stack from the try handler chain.
@ -239,6 +290,10 @@ class MacroAssembler: public Assembler {
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Support functions. // Support functions.
void GetObjectType(Register function,
Register map,
Register type_reg);
inline void BranchOnSmi(Register value, Label* smi_label, inline void BranchOnSmi(Register value, Label* smi_label,
Register scratch = at) { Register scratch = at) {
ASSERT_EQ(0, kSmiTag); ASSERT_EQ(0, kSmiTag);
@ -254,6 +309,15 @@ class MacroAssembler: public Assembler {
Branch(ne, not_smi_label, scratch, Operand(zero_reg)); Branch(ne, not_smi_label, scratch, Operand(zero_reg));
} }
void CallBuiltin(ExternalReference builtin_entry);
void CallBuiltin(Register target);
void JumpToBuiltin(ExternalReference builtin_entry);
void JumpToBuiltin(Register target);
// Generates code for reporting that an illegal operation has
// occurred.
void IllegalOperation(int num_arguments);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Runtime calls // Runtime calls
@ -342,20 +406,33 @@ class MacroAssembler: public Assembler {
bool allow_stub_calls() { return allow_stub_calls_; } bool allow_stub_calls() { return allow_stub_calls_; }
private: private:
List<Unresolved> unresolved_;
bool generating_stub_;
bool allow_stub_calls_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always, void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg)); Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always, void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg)); Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
Register code_reg,
Label* done,
InvokeFlag flag);
// Get the code for the given builtin. Returns if able to resolve // Get the code for the given builtin. Returns if able to resolve
// the function in the 'resolved' flag. // the function in the 'resolved' flag.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved); Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
List<Unresolved> unresolved_; // Activation support.
bool generating_stub_; // EnterFrame clobbers t0 and t1.
bool allow_stub_calls_; void EnterFrame(StackFrame::Type type);
// This handle will be patched with the code object on installation. void LeaveFrame(StackFrame::Type type);
Handle<Object> code_object_;
}; };

47
deps/v8/src/mips/stub-cache-mips.cc

@ -160,8 +160,31 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
Object* StubCompiler::CompileLazyCompile(Code::Flags flags) { Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
UNIMPLEMENTED_MIPS(); // Registers:
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN // a1: function
// ra: return address
// Enter an internal frame.
__ EnterInternalFrame();
// Preserve the function.
__ Push(a1);
// Setup aligned call.
__ SetupAlignedCall(t0, 1);
// Push the function on the stack as the argument to the runtime function.
__ Push(a1);
// Call the runtime function
__ CallRuntime(Runtime::kLazyCompile, 1);
__ ReturnFromAlignedCall();
// Calculate the entry point.
__ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
// Restore saved function.
__ Pop(a1);
// Tear down temporary frame.
__ LeaveInternalFrame();
// Do a tail-call of the compiled function.
__ Jump(t9);
return GetCodeWithFlags(flags, "LazyCompileStub");
} }
@ -174,6 +197,26 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
} }
Object* CallStubCompiler::CompileArrayPushCall(Object* object,
JSObject* holder,
JSFunction* function,
String* name,
CheckType check) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* CallStubCompiler::CompileArrayPopCall(Object* object,
JSObject* holder,
JSFunction* function,
String* name,
CheckType check) {
UNIMPLEMENTED_MIPS();
return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
Object* CallStubCompiler::CompileCallConstant(Object* object, Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder, JSObject* holder,
JSFunction* function, JSFunction* function,

122
deps/v8/src/mips/virtual-frame-mips.cc

@ -53,7 +53,12 @@ void VirtualFrame::SyncElementByPushing(int index) {
void VirtualFrame::SyncRange(int begin, int end) { void VirtualFrame::SyncRange(int begin, int end) {
UNIMPLEMENTED_MIPS(); // All elements are in memory on MIPS (ie, synced).
#ifdef DEBUG
for (int i = begin; i <= end; i++) {
ASSERT(elements_[i].is_synced());
}
#endif
} }
@ -63,7 +68,13 @@ void VirtualFrame::MergeTo(VirtualFrame* expected) {
void VirtualFrame::Enter() { void VirtualFrame::Enter() {
UNIMPLEMENTED_MIPS(); // TODO(MIPS): Implement DEBUG
// We are about to push four values to the frame.
Adjust(4);
__ MultiPush(ra.bit() | fp.bit() | cp.bit() | a1.bit());
// Adjust FP to point to saved FP.
__ addiu(fp, sp, 2 * kPointerSize);
} }
@ -73,7 +84,17 @@ void VirtualFrame::Exit() {
void VirtualFrame::AllocateStackSlots() { void VirtualFrame::AllocateStackSlots() {
UNIMPLEMENTED_MIPS(); int count = local_count();
if (count > 0) {
Comment cmnt(masm(), "[ Allocate space for locals");
Adjust(count);
// Initialize stack slots with 'undefined' value.
__ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
__ addiu(sp, sp, -count * kPointerSize);
for (int i = 0; i < count; i++) {
__ sw(t0, MemOperand(sp, (count-i-1)*kPointerSize));
}
}
} }
@ -128,12 +149,16 @@ void VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) { void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
UNIMPLEMENTED_MIPS(); PrepareForCall(arg_count, arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(f, arg_count);
} }
void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) { void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
UNIMPLEMENTED_MIPS(); PrepareForCall(arg_count, arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(id, arg_count);
} }
@ -155,16 +180,37 @@ void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
} }
void VirtualFrame::RawCallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallCodeObject(Handle<Code> code, void VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode, RelocInfo::Mode rmode,
int dropped_args) { int dropped_args) {
UNIMPLEMENTED_MIPS(); switch (code->kind()) {
case Code::CALL_IC:
break;
case Code::FUNCTION:
UNIMPLEMENTED_MIPS();
break;
case Code::KEYED_LOAD_IC:
UNIMPLEMENTED_MIPS();
break;
case Code::LOAD_IC:
UNIMPLEMENTED_MIPS();
break;
case Code::KEYED_STORE_IC:
UNIMPLEMENTED_MIPS();
break;
case Code::STORE_IC:
UNIMPLEMENTED_MIPS();
break;
case Code::BUILTIN:
UNIMPLEMENTED_MIPS();
break;
default:
UNREACHABLE();
break;
}
Forget(dropped_args);
ASSERT(cgen()->HasValidEntryRegisters());
__ Call(code, rmode);
} }
@ -187,7 +233,24 @@ void VirtualFrame::CallCodeObject(Handle<Code> code,
void VirtualFrame::Drop(int count) { void VirtualFrame::Drop(int count) {
UNIMPLEMENTED_MIPS(); ASSERT(count >= 0);
ASSERT(height() >= count);
int num_virtual_elements = (element_count() - 1) - stack_pointer_;
// Emit code to lower the stack pointer if necessary.
if (num_virtual_elements < count) {
int num_dropped = count - num_virtual_elements;
stack_pointer_ -= num_dropped;
__ addiu(sp, sp, num_dropped * kPointerSize);
}
// Discard elements from the virtual frame and free any registers.
for (int i = 0; i < count; i++) {
FrameElement dropped = elements_.RemoveLast();
if (dropped.is_register()) {
Unuse(dropped.reg());
}
}
} }
@ -199,27 +262,50 @@ void VirtualFrame::DropFromVFrameOnly(int count) {
Result VirtualFrame::Pop() { Result VirtualFrame::Pop() {
UNIMPLEMENTED_MIPS(); UNIMPLEMENTED_MIPS();
Result res = Result(); Result res = Result();
return res; // UNIMPLEMENTED RETUR return res; // UNIMPLEMENTED RETURN
} }
void VirtualFrame::EmitPop(Register reg) { void VirtualFrame::EmitPop(Register reg) {
UNIMPLEMENTED_MIPS(); ASSERT(stack_pointer_ == element_count() - 1);
stack_pointer_--;
elements_.RemoveLast();
__ Pop(reg);
} }
void VirtualFrame::EmitMultiPop(RegList regs) { void VirtualFrame::EmitMultiPop(RegList regs) {
UNIMPLEMENTED_MIPS(); ASSERT(stack_pointer_ == element_count() - 1);
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
stack_pointer_--;
elements_.RemoveLast();
}
}
__ MultiPop(regs);
} }
void VirtualFrame::EmitPush(Register reg) { void VirtualFrame::EmitPush(Register reg) {
UNIMPLEMENTED_MIPS(); ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown()));
stack_pointer_++;
__ Push(reg);
} }
void VirtualFrame::EmitMultiPush(RegList regs) { void VirtualFrame::EmitMultiPush(RegList regs) {
UNIMPLEMENTED_MIPS(); ASSERT(stack_pointer_ == element_count() - 1);
for (int16_t i = kNumRegisters; i > 0; i--) {
if ((regs & (1 << i)) != 0) {
elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown()));
stack_pointer_++;
}
}
__ MultiPush(regs);
} }
void VirtualFrame::EmitArgumentSlots(RegList reglist) { void VirtualFrame::EmitArgumentSlots(RegList reglist) {
UNIMPLEMENTED_MIPS(); UNIMPLEMENTED_MIPS();
} }

89
deps/v8/src/mips/virtual-frame-mips.h

@ -39,18 +39,18 @@ namespace internal {
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// Virtual frames // Virtual frames
// //
// The virtual frame is an abstraction of the physical stack frame. It // The virtual frame is an abstraction of the physical stack frame. It
// encapsulates the parameters, frame-allocated locals, and the expression // encapsulates the parameters, frame-allocated locals, and the expression
// stack. It supports push/pop operations on the expression stack, as well // stack. It supports push/pop operations on the expression stack, as well
// as random access to the expression stack elements, locals, and // as random access to the expression stack elements, locals, and
// parameters. // parameters.
class VirtualFrame : public ZoneObject { class VirtualFrame : public ZoneObject {
public: public:
// A utility class to introduce a scope where the virtual frame is // A utility class to introduce a scope where the virtual frame is
// expected to remain spilled. The constructor spills the code // expected to remain spilled. The constructor spills the code
// generator's current frame, but no attempt is made to require it // generator's current frame, but no attempt is made to require it
// to stay spilled. It is intended as documentation while the code // to stay spilled. It is intended as documentation while the code
// generator is being transformed. // generator is being transformed.
class SpilledScope BASE_EMBEDDED { class SpilledScope BASE_EMBEDDED {
public: public:
@ -105,12 +105,12 @@ class VirtualFrame : public ZoneObject {
} }
// Add extra in-memory elements to the top of the frame to match an actual // Add extra in-memory elements to the top of the frame to match an actual
// frame (eg, the frame after an exception handler is pushed). No code is // frame (eg, the frame after an exception handler is pushed). No code is
// emitted. // emitted.
void Adjust(int count); void Adjust(int count);
// Forget elements from the top of the frame to match an actual frame (eg, // Forget elements from the top of the frame to match an actual frame (eg,
// the frame after a runtime call). No code is emitted. // the frame after a runtime call). No code is emitted.
void Forget(int count) { void Forget(int count) {
ASSERT(count >= 0); ASSERT(count >= 0);
ASSERT(stack_pointer_ == element_count() - 1); ASSERT(stack_pointer_ == element_count() - 1);
@ -121,7 +121,7 @@ class VirtualFrame : public ZoneObject {
} }
// Forget count elements from the top of the frame and adjust the stack // Forget count elements from the top of the frame and adjust the stack
// pointer downward. This is used, for example, before merging frames at // pointer downward. This is used, for example, before merging frames at
// break, continue, and return targets. // break, continue, and return targets.
void ForgetElements(int count); void ForgetElements(int count);
@ -133,24 +133,24 @@ class VirtualFrame : public ZoneObject {
if (is_used(reg)) SpillElementAt(register_location(reg)); if (is_used(reg)) SpillElementAt(register_location(reg));
} }
// Spill all occurrences of an arbitrary register if possible. Return the // Spill all occurrences of an arbitrary register if possible. Return the
// register spilled or no_reg if it was not possible to free any register // register spilled or no_reg if it was not possible to free any register
// (ie, they all have frame-external references). // (ie, they all have frame-external references).
Register SpillAnyRegister(); Register SpillAnyRegister();
// Prepare this virtual frame for merging to an expected frame by // Prepare this virtual frame for merging to an expected frame by
// performing some state changes that do not require generating // performing some state changes that do not require generating
// code. It is guaranteed that no code will be generated. // code. It is guaranteed that no code will be generated.
void PrepareMergeTo(VirtualFrame* expected); void PrepareMergeTo(VirtualFrame* expected);
// Make this virtual frame have a state identical to an expected virtual // Make this virtual frame have a state identical to an expected virtual
// frame. As a side effect, code may be emitted to make this frame match // frame. As a side effect, code may be emitted to make this frame match
// the expected one. // the expected one.
void MergeTo(VirtualFrame* expected); void MergeTo(VirtualFrame* expected);
// Detach a frame from its code generator, perhaps temporarily. This // Detach a frame from its code generator, perhaps temporarily. This
// tells the register allocator that it is free to use frame-internal // tells the register allocator that it is free to use frame-internal
// registers. Used when the code generator's frame is switched from this // registers. Used when the code generator's frame is switched from this
// one to NULL by an unconditional jump. // one to NULL by an unconditional jump.
void DetachFromCodeGenerator() { void DetachFromCodeGenerator() {
RegisterAllocator* cgen_allocator = cgen()->allocator(); RegisterAllocator* cgen_allocator = cgen()->allocator();
@ -159,7 +159,7 @@ class VirtualFrame : public ZoneObject {
} }
} }
// (Re)attach a frame to its code generator. This informs the register // (Re)attach a frame to its code generator. This informs the register
// allocator that the frame-internal register references are active again. // allocator that the frame-internal register references are active again.
// Used when a code generator's frame is switched from NULL to this one by // Used when a code generator's frame is switched from NULL to this one by
// binding a label. // binding a label.
@ -170,17 +170,17 @@ class VirtualFrame : public ZoneObject {
} }
} }
// Emit code for the physical JS entry and exit frame sequences. After // Emit code for the physical JS entry and exit frame sequences. After
// calling Enter, the virtual frame is ready for use; and after calling // calling Enter, the virtual frame is ready for use; and after calling
// Exit it should not be used. Note that Enter does not allocate space in // Exit it should not be used. Note that Enter does not allocate space in
// the physical frame for storing frame-allocated locals. // the physical frame for storing frame-allocated locals.
void Enter(); void Enter();
void Exit(); void Exit();
// Prepare for returning from the frame by spilling locals and // Prepare for returning from the frame by spilling locals and
// dropping all non-locals elements in the virtual frame. This // dropping all non-locals elements in the virtual frame. This
// avoids generating unnecessary merge code when jumping to the // avoids generating unnecessary merge code when jumping to the
// shared return site. Emits code for spills. // shared return site. Emits code for spills.
void PrepareForReturn(); void PrepareForReturn();
// Allocate and initialize the frame-allocated locals. // Allocate and initialize the frame-allocated locals.
@ -194,11 +194,11 @@ class VirtualFrame : public ZoneObject {
return MemOperand(sp, index * kPointerSize); return MemOperand(sp, index * kPointerSize);
} }
// Random-access store to a frame-top relative frame element. The result // Random-access store to a frame-top relative frame element. The result
// becomes owned by the frame and is invalidated. // becomes owned by the frame and is invalidated.
void SetElementAt(int index, Result* value); void SetElementAt(int index, Result* value);
// Set a frame element to a constant. The index is frame-top relative. // Set a frame element to a constant. The index is frame-top relative.
void SetElementAt(int index, Handle<Object> value) { void SetElementAt(int index, Handle<Object> value) {
Result temp(value); Result temp(value);
SetElementAt(index, &temp); SetElementAt(index, &temp);
@ -221,13 +221,13 @@ class VirtualFrame : public ZoneObject {
} }
// Push the value of a local frame slot on top of the frame and invalidate // Push the value of a local frame slot on top of the frame and invalidate
// the local slot. The slot should be written to before trying to read // the local slot. The slot should be written to before trying to read
// from it again. // from it again.
void TakeLocalAt(int index) { void TakeLocalAt(int index) {
TakeFrameSlotAt(local0_index() + index); TakeFrameSlotAt(local0_index() + index);
} }
// Store the top value on the virtual frame into a local frame slot. The // Store the top value on the virtual frame into a local frame slot. The
// value is left in place on top of the frame. // value is left in place on top of the frame.
void StoreToLocalAt(int index) { void StoreToLocalAt(int index) {
StoreToFrameSlotAt(local0_index() + index); StoreToFrameSlotAt(local0_index() + index);
@ -267,7 +267,7 @@ class VirtualFrame : public ZoneObject {
} }
// Push the value of a paramter frame slot on top of the frame and // Push the value of a paramter frame slot on top of the frame and
// invalidate the parameter slot. The slot should be written to before // invalidate the parameter slot. The slot should be written to before
// trying to read from it again. // trying to read from it again.
void TakeParameterAt(int index) { void TakeParameterAt(int index) {
TakeFrameSlotAt(param0_index() + index); TakeFrameSlotAt(param0_index() + index);
@ -292,12 +292,8 @@ class VirtualFrame : public ZoneObject {
RawCallStub(stub); RawCallStub(stub);
} }
// Call stub that expects its argument in r0. The argument is given
// as a result which must be the register r0.
void CallStub(CodeStub* stub, Result* arg); void CallStub(CodeStub* stub, Result* arg);
// Call stub that expects its arguments in r1 and r0. The arguments
// are given as results which must be the appropriate registers.
void CallStub(CodeStub* stub, Result* arg0, Result* arg1); void CallStub(CodeStub* stub, Result* arg0, Result* arg1);
// Call runtime given the number of arguments expected on (and // Call runtime given the number of arguments expected on (and
@ -317,7 +313,7 @@ class VirtualFrame : public ZoneObject {
int arg_count); int arg_count);
// Call into an IC stub given the number of arguments it removes // Call into an IC stub given the number of arguments it removes
// from the stack. Register arguments are passed as results and // from the stack. Register arguments are passed as results and
// consumed by the call. // consumed by the call.
void CallCodeObject(Handle<Code> ic, void CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode, RelocInfo::Mode rmode,
@ -333,8 +329,8 @@ class VirtualFrame : public ZoneObject {
int dropped_args, int dropped_args,
bool set_auto_args_slots = false); bool set_auto_args_slots = false);
// Drop a number of elements from the top of the expression stack. May // Drop a number of elements from the top of the expression stack. May
// emit code to affect the physical frame. Does not clobber any registers // emit code to affect the physical frame. Does not clobber any registers
// excepting possibly the stack pointer. // excepting possibly the stack pointer.
void Drop(int count); void Drop(int count);
// Similar to VirtualFrame::Drop but we don't modify the actual stack. // Similar to VirtualFrame::Drop but we don't modify the actual stack.
@ -348,7 +344,7 @@ class VirtualFrame : public ZoneObject {
// Duplicate the top element of the frame. // Duplicate the top element of the frame.
void Dup() { PushFrameSlotAt(element_count() - 1); } void Dup() { PushFrameSlotAt(element_count() - 1); }
// Pop an element from the top of the expression stack. Returns a // Pop an element from the top of the expression stack. Returns a
// Result, which may be a constant or a register. // Result, which may be a constant or a register.
Result Pop(); Result Pop();
@ -356,15 +352,15 @@ class VirtualFrame : public ZoneObject {
// emit a corresponding pop instruction. // emit a corresponding pop instruction.
void EmitPop(Register reg); void EmitPop(Register reg);
// Same but for multiple registers // Same but for multiple registers
void EmitMultiPop(RegList regs); // higher indexed registers popped first void EmitMultiPop(RegList regs);
void EmitMultiPopReversed(RegList regs); // lower first void EmitMultiPopReversed(RegList regs);
// Push an element on top of the expression stack and emit a // Push an element on top of the expression stack and emit a
// corresponding push instruction. // corresponding push instruction.
void EmitPush(Register reg); void EmitPush(Register reg);
// Same but for multiple registers. // Same but for multiple registers.
void EmitMultiPush(RegList regs); // lower indexed registers are pushed first void EmitMultiPush(RegList regs);
void EmitMultiPushReversed(RegList regs); // higher first void EmitMultiPushReversed(RegList regs);
// Push an element on the virtual frame. // Push an element on the virtual frame.
inline void Push(Register reg, NumberInfo info = NumberInfo::Unknown()); inline void Push(Register reg, NumberInfo info = NumberInfo::Unknown());
@ -384,7 +380,7 @@ class VirtualFrame : public ZoneObject {
// Nip removes zero or more elements from immediately below the top // Nip removes zero or more elements from immediately below the top
// of the frame, leaving the previous top-of-frame value on top of // of the frame, leaving the previous top-of-frame value on top of
// the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x). // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
inline void Nip(int num_dropped); inline void Nip(int num_dropped);
// This pushes 4 arguments slots on the stack and saves asked 'a' registers // This pushes 4 arguments slots on the stack and saves asked 'a' registers
@ -392,6 +388,7 @@ class VirtualFrame : public ZoneObject {
void EmitArgumentSlots(RegList reglist); void EmitArgumentSlots(RegList reglist);
inline void SetTypeForLocalAt(int index, NumberInfo info); inline void SetTypeForLocalAt(int index, NumberInfo info);
inline void SetTypeForParamAt(int index, NumberInfo info);
private: private:
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset; static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
@ -416,23 +413,23 @@ class VirtualFrame : public ZoneObject {
int local_count() { return cgen()->scope()->num_stack_slots(); } int local_count() { return cgen()->scope()->num_stack_slots(); }
// The index of the element that is at the processor's frame pointer // The index of the element that is at the processor's frame pointer
// (the fp register). The parameters, receiver, function, and context // (the fp register). The parameters, receiver, function, and context
// are below the frame pointer. // are below the frame pointer.
int frame_pointer() { return parameter_count() + 3; } int frame_pointer() { return parameter_count() + 3; }
// The index of the first parameter. The receiver lies below the first // The index of the first parameter. The receiver lies below the first
// parameter. // parameter.
int param0_index() { return 1; } int param0_index() { return 1; }
// The index of the context slot in the frame. It is immediately // The index of the context slot in the frame. It is immediately
// below the frame pointer. // below the frame pointer.
int context_index() { return frame_pointer() - 1; } int context_index() { return frame_pointer() - 1; }
// The index of the function slot in the frame. It is below the frame // The index of the function slot in the frame. It is below the frame
// pointer and context slot. // pointer and context slot.
int function_index() { return frame_pointer() - 2; } int function_index() { return frame_pointer() - 2; }
// The index of the first local. Between the frame pointer and the // The index of the first local. Between the frame pointer and the
// locals lies the return address. // locals lies the return address.
int local0_index() { return frame_pointer() + 2; } int local0_index() { return frame_pointer() + 2; }
@ -447,7 +444,7 @@ class VirtualFrame : public ZoneObject {
return (frame_pointer() - index) * kPointerSize; return (frame_pointer() - index) * kPointerSize;
} }
// Record an occurrence of a register in the virtual frame. This has the // Record an occurrence of a register in the virtual frame. This has the
// effect of incrementing the register's external reference count and // effect of incrementing the register's external reference count and
// of updating the index of the register's location in the frame. // of updating the index of the register's location in the frame.
void Use(Register reg, int index) { void Use(Register reg, int index) {
@ -456,7 +453,7 @@ class VirtualFrame : public ZoneObject {
cgen()->allocator()->Use(reg); cgen()->allocator()->Use(reg);
} }
// Record that a register reference has been dropped from the frame. This // Record that a register reference has been dropped from the frame. This
// decrements the register's external reference count and invalidates the // decrements the register's external reference count and invalidates the
// index of the register's location in the frame. // index of the register's location in the frame.
void Unuse(Register reg) { void Unuse(Register reg) {
@ -470,7 +467,7 @@ class VirtualFrame : public ZoneObject {
// constant. // constant.
void SpillElementAt(int index); void SpillElementAt(int index);
// Sync the element at a particular index. If it is a register or // Sync the element at a particular index. If it is a register or
// constant that disagrees with the value on the stack, write it to memory. // constant that disagrees with the value on the stack, write it to memory.
// Keep the element type as register or constant, and clear the dirty bit. // Keep the element type as register or constant, and clear the dirty bit.
void SyncElementAt(int index); void SyncElementAt(int index);
@ -497,7 +494,7 @@ class VirtualFrame : public ZoneObject {
void StoreToFrameSlotAt(int index); void StoreToFrameSlotAt(int index);
// Spill all elements in registers. Spill the top spilled_args elements // Spill all elements in registers. Spill the top spilled_args elements
// on the frame. Sync all other frame elements. // on the frame. Sync all other frame elements.
// Then drop dropped_args elements from the virtual frame, to match // Then drop dropped_args elements from the virtual frame, to match
// the effect of an upcoming call that will drop them from the stack. // the effect of an upcoming call that will drop them from the stack.
void PrepareForCall(int spilled_args, int dropped_args); void PrepareForCall(int spilled_args, int dropped_args);
@ -518,14 +515,14 @@ class VirtualFrame : public ZoneObject {
// Make the memory-to-register and constant-to-register moves // Make the memory-to-register and constant-to-register moves
// needed to make this frame equal the expected frame. // needed to make this frame equal the expected frame.
// Called after all register-to-memory and register-to-register // Called after all register-to-memory and register-to-register
// moves have been made. After this function returns, the frames // moves have been made. After this function returns, the frames
// should be equal. // should be equal.
void MergeMoveMemoryToRegisters(VirtualFrame* expected); void MergeMoveMemoryToRegisters(VirtualFrame* expected);
// Invalidates a frame slot (puts an invalid frame element in it). // Invalidates a frame slot (puts an invalid frame element in it).
// Copies on the frame are correctly handled, and if this slot was // Copies on the frame are correctly handled, and if this slot was
// the backing store of copies, the index of the new backing store // the backing store of copies, the index of the new backing store
// is returned. Otherwise, returns kIllegalIndex. // is returned. Otherwise, returns kIllegalIndex.
// Register counts are correctly updated. // Register counts are correctly updated.
int InvalidateFrameSlotAt(int index); int InvalidateFrameSlotAt(int index);

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save