From 1f31a7dbfe792fa6eee8a9cdcdfd662aad5cde06 Mon Sep 17 00:00:00 2001 From: Ryan Dahl Date: Wed, 7 Oct 2009 11:53:03 +0200 Subject: [PATCH] Upgrade v8 to 1.3.14 --- LICENSE | 3 - Makefile | 2 +- deps/v8/ChangeLog | 43 + deps/v8/LICENSE | 7 - deps/v8/SConstruct | 1 + deps/v8/include/v8.h | 79 +- deps/v8/src/SConscript | 61 +- deps/v8/src/api.cc | 84 +- deps/v8/src/api.h | 85 +- deps/v8/src/arguments.h | 26 + deps/v8/src/arm/assembler-arm-inl.h | 8 +- deps/v8/src/arm/assembler-arm.h | 4 +- deps/v8/src/arm/builtins-arm.cc | 413 ++++++++- deps/v8/src/arm/cfg-arm.cc | 301 ------ deps/v8/src/arm/codegen-arm.cc | 134 ++- deps/v8/src/arm/codegen-arm.h | 2 +- deps/v8/src/arm/macro-assembler-arm.cc | 77 +- deps/v8/src/arm/macro-assembler-arm.h | 44 +- deps/v8/src/arm/simulator-arm.cc | 3 +- deps/v8/src/arm/simulator-arm.h | 37 +- deps/v8/src/arm/stub-cache-arm.cc | 12 +- deps/v8/src/array.js | 4 +- deps/v8/src/assembler.h | 1 + deps/v8/src/ast.cc | 1 - deps/v8/src/ast.h | 49 +- deps/v8/src/bootstrapper.cc | 19 +- deps/v8/src/bootstrapper.h | 6 +- deps/v8/src/builtins.cc | 4 +- deps/v8/src/cfg.cc | 763 --------------- deps/v8/src/cfg.h | 871 ------------------ deps/v8/src/codegen.cc | 40 +- deps/v8/src/compiler.cc | 21 - deps/v8/src/debug-agent.cc | 5 + deps/v8/src/debug-agent.h | 5 +- deps/v8/src/debug-delay.js | 24 +- deps/v8/src/debug.cc | 5 + deps/v8/src/debug.h | 4 + deps/v8/src/execution.cc | 114 +-- deps/v8/src/execution.h | 64 +- deps/v8/src/factory.cc | 5 + deps/v8/src/factory.h | 2 + deps/v8/src/flag-definitions.h | 2 - deps/v8/src/handles.cc | 30 +- deps/v8/src/heap-profiler.cc | 210 +++-- deps/v8/src/heap-profiler.h | 21 +- deps/v8/src/heap.cc | 74 +- deps/v8/src/heap.h | 3 + deps/v8/src/ia32/assembler-ia32-inl.h | 12 +- deps/v8/src/ia32/builtins-ia32.cc | 90 +- deps/v8/src/ia32/cfg-ia32.cc | 315 ------- deps/v8/src/ia32/codegen-ia32.cc | 141 ++- deps/v8/src/ia32/codegen-ia32.h | 2 +- deps/v8/src/ia32/ic-ia32.cc | 17 +- deps/v8/src/ia32/macro-assembler-ia32.cc | 46 +- deps/v8/src/ia32/macro-assembler-ia32.h | 64 +- deps/v8/src/ia32/simulator-ia32.h | 19 +- deps/v8/src/ia32/stub-cache-ia32.cc | 12 +- deps/v8/src/list.h | 7 + deps/v8/src/log-utils.cc | 2 +- deps/v8/src/macro-assembler.h | 2 +- deps/v8/src/mark-compact.cc | 10 +- deps/v8/src/memory.h | 4 + deps/v8/src/messages.js | 17 +- deps/v8/src/mirror-delay.js | 2 - deps/v8/src/objects.cc | 157 ++-- deps/v8/src/objects.h | 59 +- deps/v8/src/parser.cc | 183 ++-- deps/v8/src/platform-freebsd.cc | 14 +- deps/v8/src/platform-macos.cc | 16 +- deps/v8/src/prettyprinter.cc | 10 - deps/v8/src/regexp-stack.cc | 8 + deps/v8/src/regexp-stack.h | 2 + deps/v8/src/rewriter.cc | 12 - deps/v8/src/runtime.cc | 40 +- deps/v8/src/serialize.cc | 32 +- deps/v8/src/spaces.cc | 136 ++- deps/v8/src/spaces.h | 74 +- deps/v8/src/string.js | 78 +- deps/v8/src/stub-cache.cc | 41 +- deps/v8/src/top.cc | 3 +- deps/v8/src/top.h | 7 + deps/v8/src/uri.js | 10 +- deps/v8/src/usage-analyzer.cc | 53 +- deps/v8/src/utils.cc | 2 +- deps/v8/src/v8.cc | 16 +- deps/v8/src/v8.h | 2 +- deps/v8/src/v8threads.cc | 34 +- deps/v8/src/v8threads.h | 1 + deps/v8/src/variables.h | 8 +- deps/v8/src/version.cc | 2 +- deps/v8/src/x64/assembler-x64-inl.h | 65 +- deps/v8/src/x64/assembler-x64.cc | 39 +- deps/v8/src/x64/assembler-x64.h | 45 +- deps/v8/src/x64/builtins-x64.cc | 449 ++++++++- deps/v8/src/x64/cfg-x64.cc | 324 ------- deps/v8/src/x64/codegen-x64.cc | 142 ++- deps/v8/src/x64/codegen-x64.h | 2 +- deps/v8/src/x64/ic-x64.cc | 84 +- deps/v8/src/x64/macro-assembler-x64.cc | 109 ++- deps/v8/src/x64/macro-assembler-x64.h | 74 +- deps/v8/src/x64/simulator-x64.h | 19 +- deps/v8/src/x64/stub-cache-x64.cc | 13 +- deps/v8/test/cctest/test-alloc.cc | 69 ++ deps/v8/test/cctest/test-api.cc | 128 ++- deps/v8/test/cctest/test-debug.cc | 5 +- deps/v8/test/cctest/test-heap-profiler.cc | 84 +- deps/v8/test/cctest/test-log.cc | 7 - deps/v8/test/cctest/test-sockets.cc | 1 + deps/v8/test/mjsunit/class-of-builtins.js | 2 +- deps/v8/test/mjsunit/debug-compile-event.js | 4 +- deps/v8/test/mjsunit/invalid-lhs.js | 11 +- deps/v8/test/mjsunit/mirror-script.js | 16 +- deps/v8/test/mjsunit/regress/regress-220.js | 2 +- deps/v8/test/mjsunit/switch.js | 4 +- .../test/mjsunit/third_party/object-keys.js | 2 + deps/v8/tools/gyp/v8.gyp | 7 +- deps/v8/tools/js2c.py | 20 +- deps/v8/tools/jsmin.py | 496 +++++----- deps/v8/tools/visual_studio/v8_base.vcproj | 12 - .../v8/tools/visual_studio/v8_base_arm.vcproj | 12 - .../v8/tools/visual_studio/v8_base_x64.vcproj | 12 - src/node.js | 4 +- tools/js2c.py | 77 +- 123 files changed, 3495 insertions(+), 4380 deletions(-) delete mode 100644 deps/v8/src/arm/cfg-arm.cc delete mode 100644 deps/v8/src/cfg.cc delete mode 100644 deps/v8/src/cfg.h delete mode 100644 deps/v8/src/ia32/cfg-ia32.cc delete mode 100644 deps/v8/src/x64/cfg-x64.cc diff --git a/LICENSE b/LICENSE index 203c7c9036..629b010ab7 100644 --- a/LICENSE +++ b/LICENSE @@ -9,9 +9,6 @@ are: This code is copyrighted by Marc Alexander Lehmann. Both are dually licensed under MIT and GPL2. - - JSMin JavaScript minifier, located at tools/jsmin.py. This code is - copyrighted by Douglas Crockford and Baruch Even and has an MIT license. - - parseUri, a URI parser, is located in lib/http.js. This is just a small snippit. It is copyrighted 2007 by Steven Levithan and released under an MIT license. diff --git a/Makefile b/Makefile index f353eae64e..0f6093917a 100644 --- a/Makefile +++ b/Makefile @@ -49,7 +49,7 @@ clean: distclean: @-rm -rf build/ - @-rm -f *.pyc + @-find tools | egrep --colour=never ".pyc$" | xargs rm check: @tools/waf-light check diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 8c7459168d..88c34f9f11 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,46 @@ +2009-10-07: Version 1.3.14 + + Added GetRealNamedProperty to the API to lookup real properties + located on the object or in the prototype chain skipping any + interceptors. + + Fix the stack limits setting API to work correctly with threads. The + stack limit now needs to be set to each thread thich is used with V8. + + Remove the high-priority flag from IdleNotification() + + Ensure V8 is initialized before locking and unlocking threads. + + Implemented a new JavaScript minifier for compressing the source of + the built-in JavaScript. This Remove non-Open Source code from Douglas + Crockford from the project. + + Added a missing optimization in StringCharAt. + + Fixed some flaky socket tests. + + Change by Alexander Botero-Lowry to fix profiler sampling on FreeBSD + in 64-bit mode. + + Fixed memory leaks in the thread management code. + + Fixed the result of assignment to a pixel array. The assigned value + is now the result. + + Error reporting for invalid left-hand sides in for-in statements, pre- + and postfix count expressions, and assignments now matches the JSC + behavior in Safari 4. + + Follow the spec in disallowing function declarations without a name. + + Always allocate code objects within a 2 GB range. On x64 architecture + this is used to use near calls (32-bit displacement) in Code objects. + + Optimized array construction ported to x64 and ARM architectures. + + [ES5] Changed Object.keys to return strings for element indices. + + 2009-09-23: Version 1.3.13 Fixed uninitialized memory problem. diff --git a/deps/v8/LICENSE b/deps/v8/LICENSE index d2862b4ee8..e3ed242d42 100644 --- a/deps/v8/LICENSE +++ b/deps/v8/LICENSE @@ -21,13 +21,6 @@ are: This code is copyrighted by Sun Microsystems Inc. and released under a 3-clause BSD license. - - JSMin JavaScript minifier, located at tools/jsmin.py. This code is - copyrighted by Douglas Crockford and Baruch Even and released under - an MIT license. - - - Valgrind client API header, located at third_party/valgrind/valgrind.h - This is release under the BSD license. - - Valgrind client API header, located at third_party/valgrind/valgrind.h This is release under the BSD license. diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct index e1a37f34f5..b5aa7abadb 100644 --- a/deps/v8/SConstruct +++ b/deps/v8/SConstruct @@ -238,6 +238,7 @@ V8_EXTRA_FLAGS = { 'gcc': { 'all': { 'WARNINGFLAGS': ['-Wall', + '-Werror', '-W', '-Wno-unused-parameter', '-Wnon-virtual-dtor'] diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 1a3177bb0d..adb9f43176 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -1,4 +1,4 @@ -// Copyright 2007-2008 the V8 project authors. All rights reserved. +// Copyright 2007-2009 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -130,6 +130,7 @@ class Data; namespace internal { class Object; +class Arguments; } @@ -1205,7 +1206,14 @@ class V8EXPORT Object : public Value { * If result.IsEmpty() no real property was located in the prototype chain. * This means interceptors in the prototype chain are not called. */ - Handle GetRealNamedPropertyInPrototypeChain(Handle key); + Local GetRealNamedPropertyInPrototypeChain(Handle key); + + /** + * If result.IsEmpty() no real property was located on the object or + * in the prototype chain. + * This means interceptors in the prototype chain are not called. + */ + Local GetRealNamedProperty(Handle key); /** Tests for a named lookup interceptor.*/ bool HasNamedLookupInterceptor(); @@ -1401,17 +1409,13 @@ class V8EXPORT Arguments { */ class V8EXPORT AccessorInfo { public: - inline AccessorInfo(Local self, - Local data, - Local holder) - : self_(self), data_(data), holder_(holder) { } + inline AccessorInfo(internal::Object** args) + : args_(args) { } inline Local Data() const; inline Local This() const; inline Local Holder() const; private: - Local self_; - Local data_; - Local holder_; + internal::Object** args_; }; @@ -1567,7 +1571,10 @@ typedef bool (*IndexedSecurityCallback)(Local host, /** * A FunctionTemplate is used to create functions at runtime. There * can only be one function created from a FunctionTemplate in a - * context. + * context. The lifetime of the created function is equal to the + * lifetime of the context. So in case the embedder needs to create + * temporary functions that can be collected using Scripts is + * preferred. * * A FunctionTemplate can have properties, these properties are added to the * function object when it is created. @@ -1974,8 +1981,13 @@ Handle V8EXPORT False(); /** - * A set of constraints that specifies the limits of the runtime's - * memory use. + * A set of constraints that specifies the limits of the runtime's memory use. + * You must set the heap size before initializing the VM - the size cannot be + * adjusted after the VM is initialized. + * + * If you are using threads then you should hold the V8::Locker lock while + * setting the stack limit and you must set a non-default stack limit separately + * for each thread. */ class V8EXPORT ResourceConstraints { public: @@ -1985,6 +1997,7 @@ class V8EXPORT ResourceConstraints { int max_old_space_size() const { return max_old_space_size_; } void set_max_old_space_size(int value) { max_old_space_size_ = value; } uint32_t* stack_limit() const { return stack_limit_; } + // Sets an address beyond which the VM's stack may not grow. void set_stack_limit(uint32_t* value) { stack_limit_ = value; } private: int max_young_space_size_; @@ -2192,7 +2205,8 @@ class V8EXPORT V8 { /** * Initializes from snapshot if possible. Otherwise, attempts to - * initialize from scratch. + * initialize from scratch. This function is called implicitly if + * you use the API without calling it first. */ static bool Initialize(); @@ -2335,12 +2349,11 @@ class V8EXPORT V8 { * Optional notification that the embedder is idle. * V8 uses the notification to reduce memory footprint. * This call can be used repeatedly if the embedder remains idle. - * \param is_high_priority tells whether the embedder is high priority. * Returns true if the embedder should stop calling IdleNotification * until real work has been done. This indicates that V8 has done * as much cleanup as it will be able to do. */ - static bool IdleNotification(bool is_high_priority); + static bool IdleNotification(); /** * Optional notification that the system is running low on memory. @@ -2742,15 +2755,15 @@ class Internals { return ((reinterpret_cast(value) & kHeapObjectTagMask) == kHeapObjectTag); } - + static inline bool HasSmiTag(internal::Object* value) { return ((reinterpret_cast(value) & kSmiTagMask) == kSmiTag); } - + static inline int SmiValue(internal::Object* value) { return static_cast(reinterpret_cast(value)) >> kSmiTagSize; } - + static inline bool IsExternalTwoByteString(int instance_type) { int representation = (instance_type & kFullStringRepresentationMask); return representation == kExternalTwoByteRepresentationTag; @@ -2863,21 +2876,6 @@ int Arguments::Length() const { } -Local AccessorInfo::Data() const { - return data_; -} - - -Local AccessorInfo::This() const { - return self_; -} - - -Local AccessorInfo::Holder() const { - return holder_; -} - - template Local HandleScope::Close(Handle value) { internal::Object** before = reinterpret_cast(*value); @@ -3075,6 +3073,21 @@ External* External::Cast(v8::Value* value) { } +Local AccessorInfo::Data() const { + return Local(reinterpret_cast(&args_[-3])); +} + + +Local AccessorInfo::This() const { + return Local(reinterpret_cast(&args_[0])); +} + + +Local AccessorInfo::Holder() const { + return Local(reinterpret_cast(&args_[-1])); +} + + /** * \example shell.cc * A simple shell that takes a list of expressions on the diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript index 423064782a..b6c2b4d266 100755 --- a/deps/v8/src/SConscript +++ b/deps/v8/src/SConscript @@ -36,49 +36,48 @@ Import('context') SOURCES = { 'all': [ 'accessors.cc', 'allocation.cc', 'api.cc', 'assembler.cc', 'ast.cc', - 'bootstrapper.cc', 'builtins.cc', 'checks.cc', 'cfg.cc', - 'code-stubs.cc', 'codegen.cc', 'compilation-cache.cc', 'compiler.cc', - 'contexts.cc', 'conversions.cc', 'counters.cc', 'dateparser.cc', - 'debug.cc', 'debug-agent.cc', 'disassembler.cc', 'execution.cc', - 'factory.cc', 'flags.cc', 'frame-element.cc', 'frames.cc', - 'func-name-inferrer.cc', 'global-handles.cc', 'handles.cc', - 'hashmap.cc', 'heap.cc', 'heap-profiler.cc', 'ic.cc', - 'interpreter-irregexp.cc', 'jsregexp.cc', 'jump-target.cc', - 'log.cc', 'log-utils.cc', 'mark-compact.cc', 'messages.cc', - 'objects.cc', 'oprofile-agent.cc', 'parser.cc', 'property.cc', - 'regexp-macro-assembler.cc', 'regexp-macro-assembler-irregexp.cc', - 'regexp-stack.cc', 'register-allocator.cc', 'rewriter.cc', - 'runtime.cc', 'scanner.cc', 'scopeinfo.cc', 'scopes.cc', - 'serialize.cc', 'snapshot-common.cc', 'spaces.cc', - 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc', + 'bootstrapper.cc', 'builtins.cc', 'checks.cc', 'code-stubs.cc', + 'codegen.cc', 'compilation-cache.cc', 'compiler.cc', 'contexts.cc', + 'conversions.cc', 'counters.cc', 'dateparser.cc', 'debug.cc', + 'debug-agent.cc', 'disassembler.cc', 'execution.cc', 'factory.cc', + 'flags.cc', 'frame-element.cc', 'frames.cc', 'func-name-inferrer.cc', + 'global-handles.cc', 'handles.cc', 'hashmap.cc', 'heap.cc', + 'heap-profiler.cc', 'ic.cc', 'interpreter-irregexp.cc', 'jsregexp.cc', + 'jump-target.cc', 'log.cc', 'log-utils.cc', 'mark-compact.cc', + 'messages.cc', 'objects.cc', 'oprofile-agent.cc', 'parser.cc', + 'property.cc', 'regexp-macro-assembler.cc', + 'regexp-macro-assembler-irregexp.cc', 'regexp-stack.cc', + 'register-allocator.cc', 'rewriter.cc', 'runtime.cc', 'scanner.cc', + 'scopeinfo.cc', 'scopes.cc', 'serialize.cc', 'snapshot-common.cc', + 'spaces.cc', 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc', 'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc', 'v8.cc', 'v8threads.cc', 'variables.cc', 'version.cc', 'virtual-frame.cc', 'zone.cc' ], 'arch:arm': [ - 'arm/assembler-arm.cc', 'arm/builtins-arm.cc', 'arm/cfg-arm.cc', - 'arm/codegen-arm.cc', 'arm/constants-arm.cc', 'arm/cpu-arm.cc', - 'arm/disasm-arm.cc', 'arm/debug-arm.cc', 'arm/frames-arm.cc', - 'arm/ic-arm.cc', 'arm/jump-target-arm.cc', 'arm/macro-assembler-arm.cc', - 'arm/regexp-macro-assembler-arm.cc', - 'arm/register-allocator-arm.cc', 'arm/stub-cache-arm.cc', - 'arm/virtual-frame-arm.cc' + 'arm/assembler-arm.cc', 'arm/builtins-arm.cc', 'arm/codegen-arm.cc', + 'arm/constants-arm.cc', 'arm/cpu-arm.cc', 'arm/disasm-arm.cc', + 'arm/debug-arm.cc', 'arm/frames-arm.cc', 'arm/ic-arm.cc', + 'arm/jump-target-arm.cc', 'arm/macro-assembler-arm.cc', + 'arm/regexp-macro-assembler-arm.cc', 'arm/register-allocator-arm.cc', + 'arm/stub-cache-arm.cc', 'arm/virtual-frame-arm.cc' ], 'arch:ia32': [ - 'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc', 'ia32/cfg-ia32.cc', + 'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc', 'ia32/codegen-ia32.cc', 'ia32/cpu-ia32.cc', 'ia32/disasm-ia32.cc', 'ia32/debug-ia32.cc', 'ia32/frames-ia32.cc', 'ia32/ic-ia32.cc', 'ia32/jump-target-ia32.cc', 'ia32/macro-assembler-ia32.cc', - 'ia32/regexp-macro-assembler-ia32.cc', 'ia32/register-allocator-ia32.cc', - 'ia32/stub-cache-ia32.cc', 'ia32/virtual-frame-ia32.cc' + 'ia32/regexp-macro-assembler-ia32.cc', + 'ia32/register-allocator-ia32.cc', 'ia32/stub-cache-ia32.cc', + 'ia32/virtual-frame-ia32.cc' ], 'arch:x64': [ - 'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/cfg-x64.cc', - 'x64/codegen-x64.cc', 'x64/cpu-x64.cc', 'x64/disasm-x64.cc', - 'x64/debug-x64.cc', 'x64/frames-x64.cc', 'x64/ic-x64.cc', - 'x64/jump-target-x64.cc', 'x64/macro-assembler-x64.cc', - 'x64/regexp-macro-assembler-x64.cc', 'x64/register-allocator-x64.cc', - 'x64/stub-cache-x64.cc', 'x64/virtual-frame-x64.cc' + 'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/codegen-x64.cc', + 'x64/cpu-x64.cc', 'x64/disasm-x64.cc', 'x64/debug-x64.cc', + 'x64/frames-x64.cc', 'x64/ic-x64.cc', 'x64/jump-target-x64.cc', + 'x64/macro-assembler-x64.cc', 'x64/regexp-macro-assembler-x64.cc', + 'x64/register-allocator-x64.cc', 'x64/stub-cache-x64.cc', + 'x64/virtual-frame-x64.cc' ], 'simulator:arm': ['arm/simulator-arm.cc'], 'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'], diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index eaa4f5a45e..00f1e0b7e1 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -28,6 +28,7 @@ #include "v8.h" #include "api.h" +#include "arguments.h" #include "bootstrapper.h" #include "compiler.h" #include "debug.h" @@ -71,7 +72,7 @@ namespace v8 { thread_local.DecrementCallDepth(); \ if (has_pending_exception) { \ if (thread_local.CallDepthIsZero() && i::Top::is_out_of_memory()) { \ - if (!thread_local.IgnoreOutOfMemory()) \ + if (!thread_local.ignore_out_of_memory()) \ i::V8::FatalProcessOutOfMemory(NULL); \ } \ bool call_depth_is_zero = thread_local.CallDepthIsZero(); \ @@ -341,9 +342,12 @@ ResourceConstraints::ResourceConstraints() bool SetResourceConstraints(ResourceConstraints* constraints) { - bool result = i::Heap::ConfigureHeap(constraints->max_young_space_size(), - constraints->max_old_space_size()); - if (!result) return false; + int semispace_size = constraints->max_young_space_size(); + int old_gen_size = constraints->max_old_space_size(); + if (semispace_size != 0 || old_gen_size != 0) { + bool result = i::Heap::ConfigureHeap(semispace_size, old_gen_size); + if (!result) return false; + } if (constraints->stack_limit() != NULL) { uintptr_t limit = reinterpret_cast(constraints->stack_limit()); i::StackGuard::SetStackLimit(limit); @@ -1898,6 +1902,7 @@ bool v8::Object::Set(v8::Handle key, v8::Handle value, v8::PropertyAttribute attribs) { ON_BAILOUT("v8::Object::Set()", return false); ENTER_V8; + HandleScope scope; i::Handle self = Utils::OpenHandle(this); i::Handle key_obj = Utils::OpenHandle(*key); i::Handle value_obj = Utils::OpenHandle(*value); @@ -1918,6 +1923,7 @@ bool v8::Object::ForceSet(v8::Handle key, v8::PropertyAttribute attribs) { ON_BAILOUT("v8::Object::ForceSet()", return false); ENTER_V8; + HandleScope scope; i::Handle self = Utils::OpenHandle(this); i::Handle key_obj = Utils::OpenHandle(*key); i::Handle value_obj = Utils::OpenHandle(*value); @@ -1936,6 +1942,7 @@ bool v8::Object::ForceSet(v8::Handle key, bool v8::Object::ForceDelete(v8::Handle key) { ON_BAILOUT("v8::Object::ForceDelete()", return false); ENTER_V8; + HandleScope scope; i::Handle self = Utils::OpenHandle(this); i::Handle key_obj = Utils::OpenHandle(*key); EXCEPTION_PREAMBLE(); @@ -2121,7 +2128,7 @@ bool v8::Object::HasIndexedLookupInterceptor() { } -Handle v8::Object::GetRealNamedPropertyInPrototypeChain( +Local v8::Object::GetRealNamedPropertyInPrototypeChain( Handle key) { ON_BAILOUT("v8::Object::GetRealNamedPropertyInPrototypeChain()", return Local()); @@ -2142,12 +2149,32 @@ Handle v8::Object::GetRealNamedPropertyInPrototypeChain( } +Local v8::Object::GetRealNamedProperty(Handle key) { + ON_BAILOUT("v8::Object::GetRealNamedProperty()", return Local()); + ENTER_V8; + i::Handle self_obj = Utils::OpenHandle(this); + i::Handle key_obj = Utils::OpenHandle(*key); + i::LookupResult lookup; + self_obj->LookupRealNamedProperty(*key_obj, &lookup); + if (lookup.IsValid()) { + PropertyAttributes attributes; + i::Handle result(self_obj->GetProperty(*self_obj, + &lookup, + *key_obj, + &attributes)); + return Utils::ToLocal(result); + } + return Local(); // No real property was found in prototype chain. +} + + // Turns on access checks by copying the map and setting the check flag. // Because the object gets a new map, existing inline cache caching // the old map of this object will fail. void v8::Object::TurnOnAccessCheck() { ON_BAILOUT("v8::Object::TurnOnAccessCheck()", return); ENTER_V8; + HandleScope scope; i::Handle obj = Utils::OpenHandle(this); i::Handle new_map = @@ -2177,6 +2204,7 @@ Local v8::Object::Clone() { int v8::Object::GetIdentityHash() { ON_BAILOUT("v8::Object::GetIdentityHash()", return 0); ENTER_V8; + HandleScope scope; i::Handle self = Utils::OpenHandle(this); i::Handle hidden_props(i::GetHiddenProperties(self, true)); i::Handle hash_symbol = i::Factory::identity_hash_symbol(); @@ -2206,6 +2234,7 @@ bool v8::Object::SetHiddenValue(v8::Handle key, v8::Handle value) { ON_BAILOUT("v8::Object::SetHiddenValue()", return false); ENTER_V8; + HandleScope scope; i::Handle self = Utils::OpenHandle(this); i::Handle hidden_props(i::GetHiddenProperties(self, true)); i::Handle key_obj = Utils::OpenHandle(*key); @@ -2245,6 +2274,7 @@ v8::Local v8::Object::GetHiddenValue(v8::Handle key) { bool v8::Object::DeleteHiddenValue(v8::Handle key) { ON_BAILOUT("v8::DeleteHiddenValue()", return false); ENTER_V8; + HandleScope scope; i::Handle self = Utils::OpenHandle(this); i::Handle hidden_props(i::GetHiddenProperties(self, false)); if (hidden_props->IsUndefined()) { @@ -2259,6 +2289,7 @@ bool v8::Object::DeleteHiddenValue(v8::Handle key) { void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) { ON_BAILOUT("v8::SetElementsToPixelData()", return); ENTER_V8; + HandleScope scope; if (!ApiCheck(i::Smi::IsValid(length), "v8::Object::SetIndexedPropertiesToPixelData()", "length exceeds max acceptable value")) { @@ -2419,20 +2450,14 @@ int String::Write(uint16_t* buffer, int start, int length) const { ENTER_V8; ASSERT(start >= 0 && length >= -1); i::Handle str = Utils::OpenHandle(this); - // Flatten the string for efficiency. This applies whether we are - // using StringInputBuffer or Get(i) to access the characters. - str->TryFlattenIfNotFlat(); int end = length; if ( (length == -1) || (length > str->length() - start) ) end = str->length() - start; if (end < 0) return 0; - write_input_buffer.Reset(start, *str); - int i; - for (i = 0; i < end; i++) - buffer[i] = write_input_buffer.GetNext(); - if (length == -1 || i < length) - buffer[i] = '\0'; - return i; + i::String::WriteToFlat(*str, buffer, start, end); + if (length == -1 || end < length) + buffer[end] = '\0'; + return end; } @@ -2577,9 +2602,11 @@ bool v8::V8::Dispose() { } -bool v8::V8::IdleNotification(bool is_high_priority) { - if (!i::V8::IsRunning()) return false; - return i::V8::IdleNotification(is_high_priority); +bool v8::V8::IdleNotification() { + // Returning true tells the caller that it need not + // continue to call IdleNotification. + if (!i::V8::IsRunning()) return true; + return i::V8::IdleNotification(); } @@ -2740,7 +2767,9 @@ v8::Local Context::GetCurrent() { v8::Local Context::GetCalling() { if (IsDeadCheck("v8::Context::GetCalling()")) return Local(); - i::Handle context(i::Top::GetCallingGlobalContext()); + i::Handle calling = i::Top::GetCallingGlobalContext(); + if (calling.is_null()) return Local(); + i::Handle context = i::Handle::cast(calling); return Utils::ToLocal(context); } @@ -3187,7 +3216,7 @@ Local v8::Integer::New(int32_t value) { void V8::IgnoreOutOfMemoryException() { - thread_local.SetIgnoreOutOfMemory(true); + thread_local.set_ignore_out_of_memory(true); } @@ -3669,6 +3698,11 @@ HandleScopeImplementer* HandleScopeImplementer::instance() { } +void HandleScopeImplementer::FreeThreadResources() { + thread_local.Free(); +} + + char* HandleScopeImplementer::ArchiveThread(char* storage) { return thread_local.ArchiveThreadHelper(storage); } @@ -3680,7 +3714,7 @@ char* HandleScopeImplementer::ArchiveThreadHelper(char* storage) { handle_scope_data_ = *current; memcpy(storage, this, sizeof(*this)); - Initialize(); + ResetAfterArchive(); current->Initialize(); return storage + ArchiveSpacePerThread(); @@ -3706,14 +3740,14 @@ char* HandleScopeImplementer::RestoreThreadHelper(char* storage) { void HandleScopeImplementer::IterateThis(ObjectVisitor* v) { // Iterate over all handles in the blocks except for the last. - for (int i = Blocks()->length() - 2; i >= 0; --i) { - Object** block = Blocks()->at(i); + for (int i = blocks()->length() - 2; i >= 0; --i) { + Object** block = blocks()->at(i); v->VisitPointers(block, &block[kHandleBlockSize]); } // Iterate over live handles in the last block (if any). - if (!Blocks()->is_empty()) { - v->VisitPointers(Blocks()->last(), handle_scope_data_.next); + if (!blocks()->is_empty()) { + v->VisitPointers(blocks()->last(), handle_scope_data_.next); } if (!saved_contexts_.is_empty()) { diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h index 9ae6307b4d..1221f352cc 100644 --- a/deps/v8/src/api.h +++ b/deps/v8/src/api.h @@ -311,20 +311,12 @@ class HandleScopeImplementer { public: HandleScopeImplementer() - : blocks(0), + : blocks_(0), entered_contexts_(0), - saved_contexts_(0) { - Initialize(); - } - - void Initialize() { - blocks.Initialize(0); - entered_contexts_.Initialize(0); - saved_contexts_.Initialize(0); - spare = NULL; - ignore_out_of_memory = false; - call_depth = 0; - } + saved_contexts_(0), + spare_(NULL), + ignore_out_of_memory_(false), + call_depth_(0) { } static HandleScopeImplementer* instance(); @@ -332,6 +324,7 @@ class HandleScopeImplementer { static int ArchiveSpacePerThread(); static char* RestoreThread(char* from); static char* ArchiveThread(char* to); + static void FreeThreadResources(); // Garbage collection support. static void Iterate(v8::internal::ObjectVisitor* v); @@ -341,9 +334,9 @@ class HandleScopeImplementer { inline internal::Object** GetSpareOrNewBlock(); inline void DeleteExtensions(int extensions); - inline void IncrementCallDepth() {call_depth++;} - inline void DecrementCallDepth() {call_depth--;} - inline bool CallDepthIsZero() { return call_depth == 0; } + inline void IncrementCallDepth() {call_depth_++;} + inline void DecrementCallDepth() {call_depth_--;} + inline bool CallDepthIsZero() { return call_depth_ == 0; } inline void EnterContext(Handle context); inline bool LeaveLastContext(); @@ -356,20 +349,44 @@ class HandleScopeImplementer { inline Context* RestoreContext(); inline bool HasSavedContexts(); - inline List* Blocks() { return &blocks; } - - inline bool IgnoreOutOfMemory() { return ignore_out_of_memory; } - inline void SetIgnoreOutOfMemory(bool value) { ignore_out_of_memory = value; } + inline List* blocks() { return &blocks_; } + inline bool ignore_out_of_memory() { return ignore_out_of_memory_; } + inline void set_ignore_out_of_memory(bool value) { + ignore_out_of_memory_ = value; + } private: - List blocks; - Object** spare; - int call_depth; + void ResetAfterArchive() { + blocks_.Initialize(0); + entered_contexts_.Initialize(0); + saved_contexts_.Initialize(0); + spare_ = NULL; + ignore_out_of_memory_ = false; + call_depth_ = 0; + } + + void Free() { + ASSERT(blocks_.length() == 0); + ASSERT(entered_contexts_.length() == 0); + ASSERT(saved_contexts_.length() == 0); + blocks_.Free(); + entered_contexts_.Free(); + saved_contexts_.Free(); + if (spare_ != NULL) { + DeleteArray(spare_); + spare_ = NULL; + } + ASSERT(call_depth_ == 0); + } + + List blocks_; // Used as a stack to keep track of entered contexts. List > entered_contexts_; // Used as a stack to keep track of saved contexts. List saved_contexts_; - bool ignore_out_of_memory; + Object** spare_; + bool ignore_out_of_memory_; + int call_depth_; // This is only used for threading support. v8::ImplementationUtilities::HandleScopeData handle_scope_data_; @@ -419,32 +436,32 @@ Handle HandleScopeImplementer::LastEnteredContext() { // If there's a spare block, use it for growing the current scope. internal::Object** HandleScopeImplementer::GetSpareOrNewBlock() { - internal::Object** block = (spare != NULL) ? - spare : + internal::Object** block = (spare_ != NULL) ? + spare_ : NewArray(kHandleBlockSize); - spare = NULL; + spare_ = NULL; return block; } void HandleScopeImplementer::DeleteExtensions(int extensions) { - if (spare != NULL) { - DeleteArray(spare); - spare = NULL; + if (spare_ != NULL) { + DeleteArray(spare_); + spare_ = NULL; } for (int i = extensions; i > 1; --i) { - internal::Object** block = blocks.RemoveLast(); + internal::Object** block = blocks_.RemoveLast(); #ifdef DEBUG v8::ImplementationUtilities::ZapHandleRange(block, &block[kHandleBlockSize]); #endif DeleteArray(block); } - spare = blocks.RemoveLast(); + spare_ = blocks_.RemoveLast(); #ifdef DEBUG v8::ImplementationUtilities::ZapHandleRange( - spare, - &spare[kHandleBlockSize]); + spare_, + &spare_[kHandleBlockSize]); #endif } diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h index 80f90063ba..d2f1bfce54 100644 --- a/deps/v8/src/arguments.h +++ b/deps/v8/src/arguments.h @@ -45,6 +45,9 @@ namespace internal { class Arguments BASE_EMBEDDED { public: + Arguments(int length, Object** arguments) + : length_(length), arguments_(arguments) { } + Object*& operator[] (int index) { ASSERT(0 <= index && index < length_); return arguments_[-index]; @@ -61,11 +64,34 @@ class Arguments BASE_EMBEDDED { // Get the total number of arguments including the receiver. int length() const { return length_; } + Object** arguments() { return arguments_; } + private: int length_; Object** arguments_; }; + +// Cursom arguments replicate a small segment of stack that can be +// accessed through an Arguments object the same way the actual stack +// can. +class CustomArguments : public Relocatable { + public: + inline CustomArguments(Object *data, + JSObject *self, + JSObject *holder) { + values_[3] = self; + values_[2] = holder; + values_[1] = Smi::FromInt(0); + values_[0] = data; + } + void IterateInstance(ObjectVisitor* v); + Object** end() { return values_ + 3; } + private: + Object* values_[4]; +}; + + } } // namespace v8::internal #endif // V8_ARGUMENTS_H_ diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h index cd5a1bbfd7..5417ed7d36 100644 --- a/deps/v8/src/arm/assembler-arm-inl.h +++ b/deps/v8/src/arm/assembler-arm-inl.h @@ -81,7 +81,13 @@ void RelocInfo::set_target_address(Address target) { Object* RelocInfo::target_object() { ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); - return reinterpret_cast(Assembler::target_address_at(pc_)); + return Memory::Object_at(Assembler::target_address_address_at(pc_)); +} + + +Handle RelocInfo::target_object_handle(Assembler *origin) { + ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); + return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_)); } diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index 7e43f2e5da..d1df08c571 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -645,8 +645,8 @@ class Assembler : public Malloced { str(src, MemOperand(sp, 4, NegPreIndex), cond); } - void pop(Register dst) { - ldr(dst, MemOperand(sp, 4, PostIndex), al); + void pop(Register dst, Condition cond = al) { + ldr(dst, MemOperand(sp, 4, PostIndex), cond); } void pop() { diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index cdea1cbf6d..d7afb37af1 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -44,15 +44,379 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) { __ str(r1, MemOperand(ip, 0)); // The actual argument count has already been loaded into register - // r0, but JumpToBuiltin expects r0 to contain the number of + // r0, but JumpToRuntime expects r0 to contain the number of // arguments including the receiver. __ add(r0, r0, Operand(1)); - __ JumpToBuiltin(ExternalReference(id)); + __ JumpToRuntime(ExternalReference(id)); +} + + +// Load the built-in Array function from the current context. +static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) { + // Load the global context. + + __ ldr(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); + __ ldr(result, + FieldMemOperand(result, GlobalObject::kGlobalContextOffset)); + // Load the Array function from the global context. + __ ldr(result, + MemOperand(result, + Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX))); +} + + +// This constant has the same value as JSArray::kPreallocatedArrayElements and +// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding +// below should be reconsidered. +static const int kLoopUnfoldLimit = 4; + + +// Allocate an empty JSArray. The allocated array is put into the result +// register. An elements backing store is allocated with size initial_capacity +// and filled with the hole values. +static void AllocateEmptyJSArray(MacroAssembler* masm, + Register array_function, + Register result, + Register scratch1, + Register scratch2, + Register scratch3, + int initial_capacity, + Label* gc_required) { + ASSERT(initial_capacity > 0); + // Load the initial map from the array function. + __ ldr(scratch1, FieldMemOperand(array_function, + JSFunction::kPrototypeOrInitialMapOffset)); + + // Allocate the JSArray object together with space for a fixed array with the + // requested elements. + int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity); + __ AllocateInNewSpace(size / kPointerSize, + result, + scratch2, + scratch3, + gc_required, + TAG_OBJECT); + + // Allocated the JSArray. Now initialize the fields except for the elements + // array. + // result: JSObject + // scratch1: initial map + // scratch2: start of next object + __ str(scratch1, FieldMemOperand(result, JSObject::kMapOffset)); + __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex); + __ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset)); + // Field JSArray::kElementsOffset is initialized later. + __ mov(scratch3, Operand(0)); + __ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset)); + + // Calculate the location of the elements array and set elements array member + // of the JSArray. + // result: JSObject + // scratch2: start of next object + __ lea(scratch1, MemOperand(result, JSArray::kSize)); + __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset)); + + // Clear the heap tag on the elements array. + __ and_(scratch1, scratch1, Operand(~kHeapObjectTagMask)); + + // Initialize the FixedArray and fill it with holes. FixedArray length is not + // stored as a smi. + // result: JSObject + // scratch1: elements array (untagged) + // scratch2: start of next object + __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex); + ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset); + __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex)); + __ mov(scratch3, Operand(initial_capacity)); + ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset); + __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex)); + + // Fill the FixedArray with the hole value. + ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize); + ASSERT(initial_capacity <= kLoopUnfoldLimit); + __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex); + for (int i = 0; i < initial_capacity; i++) { + __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex)); + } +} + +// Allocate a JSArray with the number of elements stored in a register. The +// register array_function holds the built-in Array function and the register +// array_size holds the size of the array as a smi. The allocated array is put +// into the result register and beginning and end of the FixedArray elements +// storage is put into registers elements_array_storage and elements_array_end +// (see below for when that is not the case). If the parameter fill_with_holes +// is true the allocated elements backing store is filled with the hole values +// otherwise it is left uninitialized. When the backing store is filled the +// register elements_array_storage is scratched. +static void AllocateJSArray(MacroAssembler* masm, + Register array_function, // Array function. + Register array_size, // As a smi. + Register result, + Register elements_array_storage, + Register elements_array_end, + Register scratch1, + Register scratch2, + bool fill_with_hole, + Label* gc_required) { + Label not_empty, allocated; + + // Load the initial map from the array function. + __ ldr(elements_array_storage, + FieldMemOperand(array_function, + JSFunction::kPrototypeOrInitialMapOffset)); + + // Check whether an empty sized array is requested. + __ tst(array_size, array_size); + __ b(nz, ¬_empty); + + // If an empty array is requested allocate a small elements array anyway. This + // keeps the code below free of special casing for the empty array. + int size = JSArray::kSize + + FixedArray::SizeFor(JSArray::kPreallocatedArrayElements); + __ AllocateInNewSpace(size / kPointerSize, + result, + elements_array_end, + scratch1, + gc_required, + TAG_OBJECT); + __ jmp(&allocated); + + // Allocate the JSArray object together with space for a FixedArray with the + // requested number of elements. + __ bind(¬_empty); + ASSERT(kSmiTagSize == 1 && kSmiTag == 0); + __ mov(elements_array_end, + Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize)); + __ add(elements_array_end, + elements_array_end, + Operand(array_size, ASR, kSmiTagSize)); + __ AllocateInNewSpace(elements_array_end, + result, + scratch1, + scratch2, + gc_required, + TAG_OBJECT); + + // Allocated the JSArray. Now initialize the fields except for the elements + // array. + // result: JSObject + // elements_array_storage: initial map + // array_size: size of array (smi) + __ bind(&allocated); + __ str(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset)); + __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex); + __ str(elements_array_storage, + FieldMemOperand(result, JSArray::kPropertiesOffset)); + // Field JSArray::kElementsOffset is initialized later. + __ str(array_size, FieldMemOperand(result, JSArray::kLengthOffset)); + + // Calculate the location of the elements array and set elements array member + // of the JSArray. + // result: JSObject + // array_size: size of array (smi) + __ add(elements_array_storage, result, Operand(JSArray::kSize)); + __ str(elements_array_storage, + FieldMemOperand(result, JSArray::kElementsOffset)); + + // Clear the heap tag on the elements array. + __ and_(elements_array_storage, + elements_array_storage, + Operand(~kHeapObjectTagMask)); + // Initialize the fixed array and fill it with holes. FixedArray length is not + // stored as a smi. + // result: JSObject + // elements_array_storage: elements array (untagged) + // array_size: size of array (smi) + ASSERT(kSmiTag == 0); + __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex); + ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset); + __ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex)); + // Convert array_size from smi to value. + __ mov(array_size, + Operand(array_size, ASR, kSmiTagSize)); + __ tst(array_size, array_size); + // Length of the FixedArray is the number of pre-allocated elements if + // the actual JSArray has length 0 and the size of the JSArray for non-empty + // JSArrays. The length of a FixedArray is not stored as a smi. + __ mov(array_size, Operand(JSArray::kPreallocatedArrayElements), LeaveCC, eq); + ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset); + __ str(array_size, + MemOperand(elements_array_storage, kPointerSize, PostIndex)); + + // Calculate elements array and elements array end. + // result: JSObject + // elements_array_storage: elements array element storage + // array_size: size of elements array + __ add(elements_array_end, + elements_array_storage, + Operand(array_size, LSL, kPointerSizeLog2)); + + // Fill the allocated FixedArray with the hole value if requested. + // result: JSObject + // elements_array_storage: elements array element storage + // elements_array_end: start of next object + if (fill_with_hole) { + Label loop, entry; + __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex); + __ jmp(&entry); + __ bind(&loop); + __ str(scratch1, + MemOperand(elements_array_storage, kPointerSize, PostIndex)); + __ bind(&entry); + __ cmp(elements_array_storage, elements_array_end); + __ b(lt, &loop); + } +} + +// Create a new array for the built-in Array function. This function allocates +// the JSArray object and the FixedArray elements array and initializes these. +// If the Array cannot be constructed in native code the runtime is called. This +// function assumes the following state: +// r0: argc +// r1: constructor (built-in Array function) +// lr: return address +// sp[0]: last argument +// This function is used for both construct and normal calls of Array. The only +// difference between handling a construct call and a normal call is that for a +// construct call the constructor function in r1 needs to be preserved for +// entering the generic code. In both cases argc in r0 needs to be preserved. +// Both registers are preserved by this code so no need to differentiate between +// construct call and normal call. +static void ArrayNativeCode(MacroAssembler* masm, + Label *call_generic_code) { + Label argc_one_or_more, argc_two_or_more; + + // Check for array construction with zero arguments or one. + __ cmp(r0, Operand(0)); + __ b(ne, &argc_one_or_more); + + // Handle construction of an empty array. + AllocateEmptyJSArray(masm, + r1, + r2, + r3, + r4, + r5, + JSArray::kPreallocatedArrayElements, + call_generic_code); + __ IncrementCounter(&Counters::array_function_native, 1, r3, r4); + // Setup return value, remove receiver from stack and return. + __ mov(r0, r2); + __ add(sp, sp, Operand(kPointerSize)); + __ Jump(lr); + + // Check for one argument. Bail out if argument is not smi or if it is + // negative. + __ bind(&argc_one_or_more); + __ cmp(r0, Operand(1)); + __ b(ne, &argc_two_or_more); + ASSERT(kSmiTag == 0); + __ ldr(r2, MemOperand(sp)); // Get the argument from the stack. + __ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC); + __ b(ne, call_generic_code); + + // Handle construction of an empty array of a certain size. Bail out if size + // is too large to actually allocate an elements array. + ASSERT(kSmiTag == 0); + __ cmp(r2, Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize)); + __ b(ge, call_generic_code); + + // r0: argc + // r1: constructor + // r2: array_size (smi) + // sp[0]: argument + AllocateJSArray(masm, + r1, + r2, + r3, + r4, + r5, + r6, + r7, + true, + call_generic_code); + __ IncrementCounter(&Counters::array_function_native, 1, r2, r4); + // Setup return value, remove receiver and argument from stack and return. + __ mov(r0, r3); + __ add(sp, sp, Operand(2 * kPointerSize)); + __ Jump(lr); + + // Handle construction of an array from a list of arguments. + __ bind(&argc_two_or_more); + __ mov(r2, Operand(r0, LSL, kSmiTagSize)); // Convet argc to a smi. + + // r0: argc + // r1: constructor + // r2: array_size (smi) + // sp[0]: last argument + AllocateJSArray(masm, + r1, + r2, + r3, + r4, + r5, + r6, + r7, + false, + call_generic_code); + __ IncrementCounter(&Counters::array_function_native, 1, r2, r6); + + // Fill arguments as array elements. Copy from the top of the stack (last + // element) to the array backing store filling it backwards. Note: + // elements_array_end points after the backing store therefore PreIndex is + // used when filling the backing store. + // r0: argc + // r3: JSArray + // r4: elements_array storage start (untagged) + // r5: elements_array_end (untagged) + // sp[0]: last argument + Label loop, entry; + __ jmp(&entry); + __ bind(&loop); + __ ldr(r2, MemOperand(sp, kPointerSize, PostIndex)); + __ str(r2, MemOperand(r5, -kPointerSize, PreIndex)); + __ bind(&entry); + __ cmp(r4, r5); + __ b(lt, &loop); + + // Remove caller arguments and receiver from the stack, setup return value and + // return. + // r0: argc + // r3: JSArray + // sp[0]: receiver + __ add(sp, sp, Operand(kPointerSize)); + __ mov(r0, r3); + __ Jump(lr); } void Builtins::Generate_ArrayCode(MacroAssembler* masm) { - // Just jump to the generic array code. + // ----------- S t a t e ------------- + // -- r0 : number of arguments + // -- lr : return address + // -- sp[...]: constructor arguments + // ----------------------------------- + Label generic_array_code, one_or_more_arguments, two_or_more_arguments; + + // Get the Array function. + GenerateLoadArrayFunction(masm, r1); + + if (FLAG_debug_code) { + // Initial map for the builtin Array function shoud be a map. + __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); + __ tst(r2, Operand(kSmiTagMask)); + __ Assert(ne, "Unexpected initial map for Array function"); + __ CompareObjectType(r2, r3, r4, MAP_TYPE); + __ Assert(eq, "Unexpected initial map for Array function"); + } + + // Run the native code for the Array function called as a normal function. + ArrayNativeCode(masm, &generic_array_code); + + // Jump to the generic array code if the specialized code cannot handle + // the construction. + __ bind(&generic_array_code); Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric); Handle array_code(code); __ Jump(array_code, RelocInfo::CODE_TARGET); @@ -60,7 +424,34 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) { void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { - // Just jump to the generic construct code. + // ----------- S t a t e ------------- + // -- r0 : number of arguments + // -- r1 : constructor function + // -- lr : return address + // -- sp[...]: constructor arguments + // ----------------------------------- + Label generic_constructor; + + if (FLAG_debug_code) { + // The array construct code is only set for the builtin Array function which + // always have a map. + GenerateLoadArrayFunction(masm, r2); + __ cmp(r1, r2); + __ Assert(eq, "Unexpected Array function"); + // Initial map for the builtin Array function should be a map. + __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); + __ tst(r2, Operand(kSmiTagMask)); + __ Assert(ne, "Unexpected initial map for Array function"); + __ CompareObjectType(r2, r3, r4, MAP_TYPE); + __ Assert(eq, "Unexpected initial map for Array function"); + } + + // Run the native code for the Array function called as a constructor. + ArrayNativeCode(masm, &generic_constructor); + + // Jump to the generic construct code in case the specialized code cannot + // handle the construction. + __ bind(&generic_constructor); Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric); Handle generic_construct_stub(code); __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); @@ -149,7 +540,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // r2: initial map // r7: undefined __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset)); - __ AllocateObjectInNewSpace(r3, r4, r5, r6, &rt_call, NO_ALLOCATION_FLAGS); + __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, NO_ALLOCATION_FLAGS); // Allocated the JSObject, now initialize the fields. Map is set to initial // map and properties and elements are set to empty fixed array. @@ -220,12 +611,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // r5: start of next object // r7: undefined __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize)); - __ AllocateObjectInNewSpace(r0, - r5, - r6, - r2, - &undo_allocation, - RESULT_CONTAINS_TOP); + __ AllocateInNewSpace(r0, + r5, + r6, + r2, + &undo_allocation, + RESULT_CONTAINS_TOP); // Initialize the FixedArray. // r1: constructor diff --git a/deps/v8/src/arm/cfg-arm.cc b/deps/v8/src/arm/cfg-arm.cc deleted file mode 100644 index e0e563cd87..0000000000 --- a/deps/v8/src/arm/cfg-arm.cc +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright 2009 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "cfg.h" -#include "codegen-inl.h" -#include "codegen-arm.h" // Include after codegen-inl.h. -#include "macro-assembler-arm.h" - -namespace v8 { -namespace internal { - -#define __ ACCESS_MASM(masm) - -void InstructionBlock::Compile(MacroAssembler* masm) { - ASSERT(!is_marked()); - is_marked_ = true; - { - Comment cmt(masm, "[ InstructionBlock"); - for (int i = 0, len = instructions_.length(); i < len; i++) { - // If the location of the current instruction is a temp, then the - // instruction cannot be in tail position in the block. Allocate the - // temp based on peeking ahead to the next instruction. - Instruction* instr = instructions_[i]; - Location* loc = instr->location(); - if (loc->is_temporary()) { - instructions_[i+1]->FastAllocate(TempLocation::cast(loc)); - } - instructions_[i]->Compile(masm); - } - } - successor_->Compile(masm); -} - - -void EntryNode::Compile(MacroAssembler* masm) { - ASSERT(!is_marked()); - is_marked_ = true; - { - Comment cmnt(masm, "[ EntryNode"); - __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); - __ add(fp, sp, Operand(2 * kPointerSize)); - int count = CfgGlobals::current()->fun()->scope()->num_stack_slots(); - if (count > 0) { - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - for (int i = 0; i < count; i++) { - __ push(ip); - } - } - if (FLAG_trace) { - __ CallRuntime(Runtime::kTraceEnter, 0); - } - if (FLAG_check_stack) { - StackCheckStub stub; - __ CallStub(&stub); - } - } - successor_->Compile(masm); -} - - -void ExitNode::Compile(MacroAssembler* masm) { - ASSERT(!is_marked()); - is_marked_ = true; - Comment cmnt(masm, "[ ExitNode"); - if (FLAG_trace) { - __ push(r0); - __ CallRuntime(Runtime::kTraceExit, 1); - } - __ mov(sp, fp); - __ ldm(ia_w, sp, fp.bit() | lr.bit()); - int count = CfgGlobals::current()->fun()->scope()->num_parameters(); - __ add(sp, sp, Operand((count + 1) * kPointerSize)); - __ Jump(lr); -} - - -void PropLoadInstr::Compile(MacroAssembler* masm) { - // The key should not be on the stack---if it is a compiler-generated - // temporary it is in the accumulator. - ASSERT(!key()->is_on_stack()); - - Comment cmnt(masm, "[ Load from Property"); - // If the key is known at compile-time we may be able to use a load IC. - bool is_keyed_load = true; - if (key()->is_constant()) { - // Still use the keyed load IC if the key can be parsed as an integer so - // we will get into the case that handles [] on string objects. - Handle key_val = Constant::cast(key())->handle(); - uint32_t ignored; - if (key_val->IsSymbol() && - !String::cast(*key_val)->AsArrayIndex(&ignored)) { - is_keyed_load = false; - } - } - - if (!object()->is_on_stack()) object()->Push(masm); - - if (is_keyed_load) { - key()->Push(masm); - Handle ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); - __ Call(ic, RelocInfo::CODE_TARGET); - // Discard key and receiver. - __ add(sp, sp, Operand(2 * kPointerSize)); - } else { - key()->Get(masm, r2); - Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize)); - __ Call(ic, RelocInfo::CODE_TARGET); - __ pop(); // Discard receiver. - } - location()->Set(masm, r0); -} - - -void BinaryOpInstr::Compile(MacroAssembler* masm) { - // The right-hand value should not be on the stack---if it is a - // compiler-generated temporary it is in the accumulator. - ASSERT(!right()->is_on_stack()); - - Comment cmnt(masm, "[ BinaryOpInstr"); - // We can overwrite one of the operands if it is a temporary. - OverwriteMode mode = NO_OVERWRITE; - if (left()->is_temporary()) { - mode = OVERWRITE_LEFT; - } else if (right()->is_temporary()) { - mode = OVERWRITE_RIGHT; - } - - // Move left to r1 and right to r0. - left()->Get(masm, r1); - right()->Get(masm, r0); - GenericBinaryOpStub stub(op(), mode); - __ CallStub(&stub); - location()->Set(masm, r0); -} - - -void ReturnInstr::Compile(MacroAssembler* masm) { - // The location should be 'Effect'. As a side effect, move the value to - // the accumulator. - Comment cmnt(masm, "[ ReturnInstr"); - value()->Get(masm, r0); -} - - -void Constant::Get(MacroAssembler* masm, Register reg) { - __ mov(reg, Operand(handle_)); -} - - -void Constant::Push(MacroAssembler* masm) { - __ mov(ip, Operand(handle_)); - __ push(ip); -} - - -static MemOperand ToMemOperand(SlotLocation* loc) { - switch (loc->type()) { - case Slot::PARAMETER: { - int count = CfgGlobals::current()->fun()->scope()->num_parameters(); - return MemOperand(fp, (1 + count - loc->index()) * kPointerSize); - } - case Slot::LOCAL: { - const int kOffset = JavaScriptFrameConstants::kLocal0Offset; - return MemOperand(fp, kOffset - loc->index() * kPointerSize); - } - default: - UNREACHABLE(); - return MemOperand(r0); - } -} - - -void Constant::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) { - __ mov(ip, Operand(handle_)); - __ str(ip, ToMemOperand(loc)); -} - - -void SlotLocation::Get(MacroAssembler* masm, Register reg) { - __ ldr(reg, ToMemOperand(this)); -} - - -void SlotLocation::Set(MacroAssembler* masm, Register reg) { - __ str(reg, ToMemOperand(this)); -} - - -void SlotLocation::Push(MacroAssembler* masm) { - __ ldr(ip, ToMemOperand(this)); - __ push(ip); // Push will not destroy ip. -} - - -void SlotLocation::Move(MacroAssembler* masm, Value* value) { - // Double dispatch. - value->MoveToSlot(masm, this); -} - - -void SlotLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) { - __ ldr(ip, ToMemOperand(this)); - __ str(ip, ToMemOperand(loc)); -} - - -void TempLocation::Get(MacroAssembler* masm, Register reg) { - switch (where_) { - case ACCUMULATOR: - if (!reg.is(r0)) __ mov(reg, r0); - break; - case STACK: - __ pop(reg); - break; - case NOT_ALLOCATED: - UNREACHABLE(); - } -} - - -void TempLocation::Set(MacroAssembler* masm, Register reg) { - switch (where_) { - case ACCUMULATOR: - if (!reg.is(r0)) __ mov(r0, reg); - break; - case STACK: - __ push(reg); - break; - case NOT_ALLOCATED: - UNREACHABLE(); - } -} - - -void TempLocation::Push(MacroAssembler* masm) { - switch (where_) { - case ACCUMULATOR: - __ push(r0); - break; - case STACK: - case NOT_ALLOCATED: - UNREACHABLE(); - } -} - - -void TempLocation::Move(MacroAssembler* masm, Value* value) { - switch (where_) { - case ACCUMULATOR: - value->Get(masm, r0); - case STACK: - value->Push(masm); - break; - case NOT_ALLOCATED: - UNREACHABLE(); - } -} - - -void TempLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) { - switch (where_) { - case ACCUMULATOR: - __ str(r0, ToMemOperand(loc)); - case STACK: - __ pop(ip); - __ str(ip, ToMemOperand(loc)); - break; - case NOT_ALLOCATED: - UNREACHABLE(); - } -} - -#undef __ - -} } // namespace v8::internal diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index 477ea0519b..cdd32f30f8 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -1188,7 +1188,6 @@ void CodeGenerator::VisitDeclaration(Declaration* node) { #endif VirtualFrame::SpilledScope spilled_scope; Comment cmnt(masm_, "[ Declaration"); - CodeForStatementPosition(node); Variable* var = node->proxy()->var(); ASSERT(var != NULL); // must have been resolved Slot* slot = var->slot(); @@ -2811,7 +2810,6 @@ void CodeGenerator::VisitAssignment(Assignment* node) { #endif VirtualFrame::SpilledScope spilled_scope; Comment cmnt(masm_, "[ Assignment"); - CodeForStatementPosition(node); { Reference target(this, node->target()); if (target.is_illegal()) { @@ -2909,13 +2907,11 @@ void CodeGenerator::VisitCall(Call* node) { VirtualFrame::SpilledScope spilled_scope; Comment cmnt(masm_, "[ Call"); + Expression* function = node->expression(); ZoneList* args = node->arguments(); - CodeForStatementPosition(node); // Standard function call. - // Check if the function is a variable or a property. - Expression* function = node->expression(); Variable* var = function->AsVariableProxy()->AsVariable(); Property* property = function->AsProperty(); @@ -2928,7 +2924,56 @@ void CodeGenerator::VisitCall(Call* node) { // is resolved in cache misses (this also holds for megamorphic calls). // ------------------------------------------------------------------------ - if (var != NULL && !var->is_this() && var->is_global()) { + if (var != NULL && var->is_possibly_eval()) { + // ---------------------------------- + // JavaScript example: 'eval(arg)' // eval is not known to be shadowed + // ---------------------------------- + + // In a call to eval, we first call %ResolvePossiblyDirectEval to + // resolve the function we need to call and the receiver of the + // call. Then we call the resolved function using the given + // arguments. + // Prepare stack for call to resolved function. + LoadAndSpill(function); + __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); + frame_->EmitPush(r2); // Slot for receiver + int arg_count = args->length(); + for (int i = 0; i < arg_count; i++) { + LoadAndSpill(args->at(i)); + } + + // Prepare stack for call to ResolvePossiblyDirectEval. + __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize)); + frame_->EmitPush(r1); + if (arg_count > 0) { + __ ldr(r1, MemOperand(sp, arg_count * kPointerSize)); + frame_->EmitPush(r1); + } else { + frame_->EmitPush(r2); + } + + // Resolve the call. + frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2); + + // Touch up stack with the right values for the function and the receiver. + __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize)); + __ str(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); + __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize + kPointerSize)); + __ str(r1, MemOperand(sp, arg_count * kPointerSize)); + + // Call the function. + CodeForSourcePosition(node->position()); + + InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; + CallFunctionStub call_function(arg_count, in_loop); + frame_->CallStub(&call_function, arg_count + 1); + + __ ldr(cp, frame_->Context()); + // Remove the function from the stack. + frame_->Drop(); + frame_->EmitPush(r0); + + } else if (var != NULL && !var->is_this() && var->is_global()) { // ---------------------------------- // JavaScript example: 'foo(1, 2, 3)' // foo is global // ---------------------------------- @@ -3053,72 +3098,12 @@ void CodeGenerator::VisitCall(Call* node) { } -void CodeGenerator::VisitCallEval(CallEval* node) { -#ifdef DEBUG - int original_height = frame_->height(); -#endif - VirtualFrame::SpilledScope spilled_scope; - Comment cmnt(masm_, "[ CallEval"); - - // In a call to eval, we first call %ResolvePossiblyDirectEval to resolve - // the function we need to call and the receiver of the call. - // Then we call the resolved function using the given arguments. - - ZoneList* args = node->arguments(); - Expression* function = node->expression(); - - CodeForStatementPosition(node); - - // Prepare stack for call to resolved function. - LoadAndSpill(function); - __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); - frame_->EmitPush(r2); // Slot for receiver - int arg_count = args->length(); - for (int i = 0; i < arg_count; i++) { - LoadAndSpill(args->at(i)); - } - - // Prepare stack for call to ResolvePossiblyDirectEval. - __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize)); - frame_->EmitPush(r1); - if (arg_count > 0) { - __ ldr(r1, MemOperand(sp, arg_count * kPointerSize)); - frame_->EmitPush(r1); - } else { - frame_->EmitPush(r2); - } - - // Resolve the call. - frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2); - - // Touch up stack with the right values for the function and the receiver. - __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize)); - __ str(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); - __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize + kPointerSize)); - __ str(r1, MemOperand(sp, arg_count * kPointerSize)); - - // Call the function. - CodeForSourcePosition(node->position()); - - InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; - CallFunctionStub call_function(arg_count, in_loop); - frame_->CallStub(&call_function, arg_count + 1); - - __ ldr(cp, frame_->Context()); - // Remove the function from the stack. - frame_->Drop(); - frame_->EmitPush(r0); - ASSERT(frame_->height() == original_height + 1); -} - - void CodeGenerator::VisitCallNew(CallNew* node) { #ifdef DEBUG int original_height = frame_->height(); #endif VirtualFrame::SpilledScope spilled_scope; Comment cmnt(masm_, "[ CallNew"); - CodeForStatementPosition(node); // According to ECMA-262, section 11.2.2, page 44, the function // expression in new calls must be evaluated before the @@ -4960,12 +4945,12 @@ static void AllocateHeapNumber( Register scratch2) { // Another scratch register. // Allocate an object in the heap for the heap number and tag it as a heap // object. - __ AllocateObjectInNewSpace(HeapNumber::kSize / kPointerSize, - result, - scratch1, - scratch2, - need_gc, - TAG_OBJECT); + __ AllocateInNewSpace(HeapNumber::kSize / kPointerSize, + result, + scratch1, + scratch2, + need_gc, + TAG_OBJECT); // Get heap number map and store it in the allocated object. __ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex); @@ -5076,11 +5061,14 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm, // r5: Address of heap number for result. __ push(lr); // For later. __ push(r5); // Address of heap number that is answer. + __ AlignStack(0); // Call C routine that may not cause GC or other trouble. __ mov(r5, Operand(ExternalReference::double_fp_operation(operation))); __ Call(r5); + __ pop(r4); // Address of heap number. + __ cmp(r4, Operand(Smi::FromInt(0))); + __ pop(r4, eq); // Conditional pop instruction to get rid of alignment push. // Store answer in the overwritable heap number. - __ pop(r4); #if !defined(USE_ARM_EABI) // Double returned in fp coprocessor register 0 and 1, encoded as register // cr8. Offsets must be divisible by 4 for coprocessor so we need to diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index b28e96594f..1eb0932eb6 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -370,7 +370,7 @@ class CodeGenerator: public AstVisitor { // information. void CodeForFunctionPosition(FunctionLiteral* fun); void CodeForReturnPosition(FunctionLiteral* fun); - void CodeForStatementPosition(AstNode* node); + void CodeForStatementPosition(Statement* node); void CodeForSourcePosition(int pos); #ifdef DEBUG diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 6dd9b8faab..45c6540eeb 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -291,27 +291,8 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) { // Align the stack at this point. After this point we have 5 pushes, // so in fact we have to unalign here! See also the assert on the - // alignment immediately below. -#if defined(V8_HOST_ARCH_ARM) - // Running on the real platform. Use the alignment as mandated by the local - // environment. - // Note: This will break if we ever start generating snapshots on one ARM - // platform for another ARM platform with a different alignment. - int activation_frame_alignment = OS::ActivationFrameAlignment(); -#else // defined(V8_HOST_ARCH_ARM) - // If we are using the simulator then we should always align to the expected - // alignment. As the simulator is used to generate snapshots we do not know - // if the target platform will need alignment, so we will always align at - // this point here. - int activation_frame_alignment = 2 * kPointerSize; -#endif // defined(V8_HOST_ARCH_ARM) - if (activation_frame_alignment != kPointerSize) { - // This code needs to be made more general if this assert doesn't hold. - ASSERT(activation_frame_alignment == 2 * kPointerSize); - mov(r7, Operand(Smi::FromInt(0))); - tst(sp, Operand(activation_frame_alignment - 1)); - push(r7, eq); // Conditional push instruction. - } + // alignment in AlignStack. + AlignStack(1); // Push in reverse order: caller_fp, sp_on_exit, and caller_pc. stm(db_w, sp, fp.bit() | ip.bit() | lr.bit()); @@ -343,6 +324,30 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) { } +void MacroAssembler::AlignStack(int offset) { +#if defined(V8_HOST_ARCH_ARM) + // Running on the real platform. Use the alignment as mandated by the local + // environment. + // Note: This will break if we ever start generating snapshots on one ARM + // platform for another ARM platform with a different alignment. + int activation_frame_alignment = OS::ActivationFrameAlignment(); +#else // defined(V8_HOST_ARCH_ARM) + // If we are using the simulator then we should always align to the expected + // alignment. As the simulator is used to generate snapshots we do not know + // if the target platform will need alignment, so we will always align at + // this point here. + int activation_frame_alignment = 2 * kPointerSize; +#endif // defined(V8_HOST_ARCH_ARM) + if (activation_frame_alignment != kPointerSize) { + // This code needs to be made more general if this assert doesn't hold. + ASSERT(activation_frame_alignment == 2 * kPointerSize); + mov(r7, Operand(Smi::FromInt(0))); + tst(sp, Operand(activation_frame_alignment - offset)); + push(r7, eq); // Conditional push instruction. + } +} + + void MacroAssembler::LeaveExitFrame(StackFrame::Type type) { #ifdef ENABLE_DEBUGGER_SUPPORT // Restore the memory copy of the registers by digging them out from @@ -763,12 +768,12 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, } -void MacroAssembler::AllocateObjectInNewSpace(int object_size, - Register result, - Register scratch1, - Register scratch2, - Label* gc_required, - AllocationFlags flags) { +void MacroAssembler::AllocateInNewSpace(int object_size, + Register result, + Register scratch1, + Register scratch2, + Label* gc_required, + AllocationFlags flags) { ASSERT(!result.is(scratch1)); ASSERT(!scratch1.is(scratch2)); @@ -813,12 +818,12 @@ void MacroAssembler::AllocateObjectInNewSpace(int object_size, } -void MacroAssembler::AllocateObjectInNewSpace(Register object_size, - Register result, - Register scratch1, - Register scratch2, - Label* gc_required, - AllocationFlags flags) { +void MacroAssembler::AllocateInNewSpace(Register object_size, + Register result, + Register scratch1, + Register scratch2, + Label* gc_required, + AllocationFlags flags) { ASSERT(!result.is(scratch1)); ASSERT(!scratch1.is(scratch2)); @@ -1001,11 +1006,11 @@ void MacroAssembler::TailCallRuntime(const ExternalReference& ext, // should remove this need and make the runtime routine entry code // smarter. mov(r0, Operand(num_arguments)); - JumpToBuiltin(ext); + JumpToRuntime(ext); } -void MacroAssembler::JumpToBuiltin(const ExternalReference& builtin) { +void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) { #if defined(__thumb__) // Thumb mode builtin. ASSERT((reinterpret_cast(builtin.address()) & 1) == 1); @@ -1046,7 +1051,6 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, int argc = Builtins::GetArgumentsCount(id); uint32_t flags = Bootstrapper::FixupFlagsArgumentsCount::encode(argc) | - Bootstrapper::FixupFlagsIsPCRelative::encode(true) | Bootstrapper::FixupFlagsUseCodeObject::encode(false); Unresolved entry = { pc_offset() - kInstrSize, flags, name }; unresolved_.Add(entry); @@ -1064,7 +1068,6 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { int argc = Builtins::GetArgumentsCount(id); uint32_t flags = Bootstrapper::FixupFlagsArgumentsCount::encode(argc) | - Bootstrapper::FixupFlagsIsPCRelative::encode(true) | Bootstrapper::FixupFlagsUseCodeObject::encode(true); Unresolved entry = { pc_offset() - kInstrSize, flags, name }; unresolved_.Add(entry); diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 03aa4d0c2b..ee9d70d310 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -96,6 +96,8 @@ class MacroAssembler: public Assembler { // Leave the current exit frame. Expects the return value in r0. void LeaveExitFrame(StackFrame::Type type); + // Align the stack by optionally pushing a Smi zero. + void AlignStack(int offset); // --------------------------------------------------------------------------- // JavaScript invokes @@ -171,18 +173,18 @@ class MacroAssembler: public Assembler { // bytes). If the new space is exhausted control continues at the gc_required // label. The allocated object is returned in result. If the flag // tag_allocated_object is true the result is tagged as as a heap object. - void AllocateObjectInNewSpace(int object_size, - Register result, - Register scratch1, - Register scratch2, - Label* gc_required, - AllocationFlags flags); - void AllocateObjectInNewSpace(Register object_size, - Register result, - Register scratch1, - Register scratch2, - Label* gc_required, - AllocationFlags flags); + void AllocateInNewSpace(int object_size, + Register result, + Register scratch1, + Register scratch2, + Label* gc_required, + AllocationFlags flags); + void AllocateInNewSpace(Register object_size, + Register result, + Register scratch1, + Register scratch2, + Label* gc_required, + AllocationFlags flags); // Undo allocation in new space. The object passed and objects allocated after // it will no longer be allocated. The caller must make sure that no pointers @@ -257,14 +259,14 @@ class MacroAssembler: public Assembler { void CallRuntime(Runtime::FunctionId fid, int num_arguments); // Tail call of a runtime routine (jump). - // Like JumpToBuiltin, but also takes care of passing the number + // Like JumpToRuntime, but also takes care of passing the number // of parameters. void TailCallRuntime(const ExternalReference& ext, int num_arguments, int result_size); - // Jump to the builtin routine. - void JumpToBuiltin(const ExternalReference& builtin); + // Jump to a runtime routine. + void JumpToRuntime(const ExternalReference& builtin); // Invoke specified builtin JavaScript function. Adds an entry to // the unresolved list if the name does not resolve. @@ -329,8 +331,16 @@ class MacroAssembler: public Assembler { Label* done, InvokeFlag flag); - // Get the code for the given builtin. Returns if able to resolve - // the function in the 'resolved' flag. + // Prepares for a call or jump to a builtin by doing two things: + // 1. Emits code that fetches the builtin's function object from the context + // at runtime, and puts it in the register rdi. + // 2. Fetches the builtin's code object, and returns it in a handle, at + // compile time, so that later code can emit instructions to jump or call + // the builtin directly. If the code object has not yet been created, it + // returns the builtin code object for IllegalFunction, and sets the + // output parameter "resolved" to false. Code that uses the return value + // should then add the address and the builtin name to the list of fixups + // called unresolved_, which is fixed up by the bootstrapper. Handle ResolveBuiltin(Builtins::JavaScript id, bool* resolved); // Activation support. diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index 70dfcd2a9d..22bec82201 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -409,7 +409,7 @@ void Simulator::Initialize() { Simulator::Simulator() { - ASSERT(initialized_); + Initialize(); // Setup simulator support first. Some of this information is needed to // setup the architecture state. size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack @@ -501,6 +501,7 @@ void* Simulator::RedirectExternalReference(void* external_function, // Get the active Simulator for the current thread. Simulator* Simulator::current() { + Initialize(); Simulator* sim = reinterpret_cast( v8::internal::Thread::GetThreadLocal(simulator_key)); if (sim == NULL) { diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h index 3917d6a5af..ff6bbf4302 100644 --- a/deps/v8/src/arm/simulator-arm.h +++ b/deps/v8/src/arm/simulator-arm.h @@ -36,18 +36,23 @@ #ifndef V8_ARM_SIMULATOR_ARM_H_ #define V8_ARM_SIMULATOR_ARM_H_ +#include "allocation.h" + #if defined(__arm__) // When running without a simulator we call the entry directly. #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \ (entry(p0, p1, p2, p3, p4)) -// Calculated the stack limit beyond which we will throw stack overflow errors. -// This macro must be called from a C++ method. It relies on being able to take -// the address of "this" to get a value on the current execution stack and then -// calculates the stack limit based on that value. -#define GENERATED_CODE_STACK_LIMIT(limit) \ - (reinterpret_cast(this) - limit) +// The stack limit beyond which we will throw stack overflow errors in +// generated code. Because generated code on arm uses the C stack, we +// just use the C stack limit. +class SimulatorStack : public v8::internal::AllStatic { + public: + static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) { + return c_limit; + } +}; // Call the generated regexp code directly. The entry function pointer should @@ -64,12 +69,6 @@ assembler::arm::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \ p0, p1, p2, p3, p4)) -// The simulator has its own stack. Thus it has a different stack limit from -// the C-based native code. -#define GENERATED_CODE_STACK_LIMIT(limit) \ - (assembler::arm::Simulator::current()->StackLimit()) - - #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \ assembler::arm::Simulator::current()->Call( \ FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6) @@ -219,6 +218,20 @@ class Simulator { } } // namespace assembler::arm + +// The simulator has its own stack. Thus it has a different stack limit from +// the C-based native code. Setting the c_limit to indicate a very small +// stack cause stack overflow errors, since the simulator ignores the input. +// This is unlikely to be an issue in practice, though it might cause testing +// trouble down the line. +class SimulatorStack : public v8::internal::AllStatic { + public: + static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) { + return assembler::arm::Simulator::current()->StackLimit(); + } +}; + + #endif // defined(__arm__) #endif // V8_ARM_SIMULATOR_ARM_H_ diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 9e44cfa510..8282655f7a 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -1390,12 +1390,12 @@ Object* ConstructStubCompiler::CompileConstructStub( // r2: initial map // r7: undefined __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset)); - __ AllocateObjectInNewSpace(r3, - r4, - r5, - r6, - &generic_stub_call, - NO_ALLOCATION_FLAGS); + __ AllocateInNewSpace(r3, + r4, + r5, + r6, + &generic_stub_call, + NO_ALLOCATION_FLAGS); // Allocated the JSObject, now initialize the fields. Map is set to initial // map and properties and elements are set to empty fixed array. diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js index eb69f97c18..f8e63d084b 100644 --- a/deps/v8/src/array.js +++ b/deps/v8/src/array.js @@ -709,6 +709,8 @@ function ArraySort(comparefn) { QuickSort(a, high_start, to); } + var length; + // Copies elements in the range 0..length from obj's prototype chain // to obj itself, if obj has holes. Returns one more than the maximal index // of a prototype property. @@ -826,7 +828,7 @@ function ArraySort(comparefn) { return first_undefined; } - var length = ToUint32(this.length); + length = ToUint32(this.length); if (length < 2) return this; var is_array = IS_ARRAY(this); diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index 827389a1b6..323e06aff5 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -191,6 +191,7 @@ class RelocInfo BASE_EMBEDDED { INLINE(Address target_address()); INLINE(void set_target_address(Address target)); INLINE(Object* target_object()); + INLINE(Handle target_object_handle(Assembler* origin)); INLINE(Object** target_object_address()); INLINE(void set_target_object(Object* target)); diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc index 2b6074200f..692bec01df 100644 --- a/deps/v8/src/ast.cc +++ b/deps/v8/src/ast.cc @@ -40,7 +40,6 @@ VariableProxySentinel VariableProxySentinel::identifier_proxy_(false); ValidLeftHandSideSentinel ValidLeftHandSideSentinel::instance_; Property Property::this_property_(VariableProxySentinel::this_proxy(), NULL, 0); Call Call::sentinel_(NULL, NULL, 0); -CallEval CallEval::sentinel_(NULL, NULL, 0); // ---------------------------------------------------------------------------- diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index ea83712137..6a1cdf51c6 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -85,7 +85,6 @@ namespace internal { V(Throw) \ V(Property) \ V(Call) \ - V(CallEval) \ V(CallNew) \ V(CallRuntime) \ V(UnaryOperation) \ @@ -116,7 +115,6 @@ typedef ZoneList > ZoneObjectList; class AstNode: public ZoneObject { public: - AstNode(): statement_pos_(RelocInfo::kNoPosition) { } virtual ~AstNode() { } virtual void Accept(AstVisitor* v) = 0; @@ -140,21 +138,23 @@ class AstNode: public ZoneObject { virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; } virtual ObjectLiteral* AsObjectLiteral() { return NULL; } virtual ArrayLiteral* AsArrayLiteral() { return NULL; } - - void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; } - int statement_pos() const { return statement_pos_; } - - private: - int statement_pos_; }; class Statement: public AstNode { public: + Statement() : statement_pos_(RelocInfo::kNoPosition) {} + virtual Statement* AsStatement() { return this; } virtual ReturnStatement* AsReturnStatement() { return NULL; } bool IsEmpty() { return AsEmptyStatement() != NULL; } + + void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; } + int statement_pos() const { return statement_pos_; } + + private: + int statement_pos_; }; @@ -954,12 +954,8 @@ class Property: public Expression { class Call: public Expression { public: - Call(Expression* expression, - ZoneList* arguments, - int pos) - : expression_(expression), - arguments_(arguments), - pos_(pos) { } + Call(Expression* expression, ZoneList* arguments, int pos) + : expression_(expression), arguments_(arguments), pos_(pos) { } virtual void Accept(AstVisitor* v); @@ -981,30 +977,21 @@ class Call: public Expression { }; -class CallNew: public Call { +class CallNew: public Expression { public: CallNew(Expression* expression, ZoneList* arguments, int pos) - : Call(expression, arguments, pos) { } - - virtual void Accept(AstVisitor* v); -}; - - -// The CallEval class represents a call of the form 'eval(...)' where eval -// cannot be seen to be overwritten at compile time. It is potentially a -// direct (i.e. not aliased) eval call. The real nature of the call is -// determined at runtime. -class CallEval: public Call { - public: - CallEval(Expression* expression, ZoneList* arguments, int pos) - : Call(expression, arguments, pos) { } + : expression_(expression), arguments_(arguments), pos_(pos) { } virtual void Accept(AstVisitor* v); - static CallEval* sentinel() { return &sentinel_; } + Expression* expression() const { return expression_; } + ZoneList* arguments() const { return arguments_; } + int position() { return pos_; } private: - static CallEval sentinel_; + Expression* expression_; + ZoneList* arguments_; + int pos_; }; diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index 5f38485ed7..43aa1a3b89 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -201,20 +201,13 @@ bool PendingFixups::Process(Handle builtins) { } Code* code = Code::cast(code_[i]); Address pc = code->instruction_start() + pc_[i]; - bool is_pc_relative = Bootstrapper::FixupFlagsIsPCRelative::decode(flags); + RelocInfo target(pc, RelocInfo::CODE_TARGET, 0); bool use_code_object = Bootstrapper::FixupFlagsUseCodeObject::decode(flags); - if (use_code_object) { - if (is_pc_relative) { - Assembler::set_target_address_at( - pc, reinterpret_cast
(f->code())); - } else { - *reinterpret_cast(pc) = f->code(); - } + target.set_target_object(f->code()); } else { - Assembler::set_target_address_at(pc, f->code()->instruction_start()); + target.set_target_address(f->code()->instruction_start()); } - LOG(StringEvent("resolved", name)); } Clear(); @@ -1586,6 +1579,12 @@ char* Bootstrapper::RestoreState(char* from) { } +// Called when the top-level V8 mutex is destroyed. +void Bootstrapper::FreeThreadResources() { + ASSERT(Genesis::current() == NULL); +} + + // Reserve space for statics needing saving and restoring. int Genesis::ArchiveSpacePerThread() { return sizeof(current_); diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h index 0d743e388f..15fc88dc06 100644 --- a/deps/v8/src/bootstrapper.h +++ b/deps/v8/src/bootstrapper.h @@ -66,14 +66,14 @@ class Bootstrapper : public AllStatic { static bool IsActive(); // Encoding/decoding support for fixup flags. - class FixupFlagsIsPCRelative: public BitField {}; - class FixupFlagsUseCodeObject: public BitField {}; - class FixupFlagsArgumentsCount: public BitField {}; + class FixupFlagsUseCodeObject: public BitField {}; + class FixupFlagsArgumentsCount: public BitField {}; // Support for thread preemption. static int ArchiveSpacePerThread(); static char* ArchiveState(char* to); static char* RestoreState(char* from); + static void FreeThreadResources(); }; }} // namespace v8::internal diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index 5fe4ba9a02..afb54275e6 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -172,7 +172,9 @@ BUILTIN(ArrayCodeGeneric) { } // Optimize the case where there are no parameters passed. - if (args.length() == 1) return array->Initialize(4); + if (args.length() == 1) { + return array->Initialize(JSArray::kPreallocatedArrayElements); + } // Take the arguments as elements. int number_of_elements = args.length() - 1; diff --git a/deps/v8/src/cfg.cc b/deps/v8/src/cfg.cc deleted file mode 100644 index d2dff522b5..0000000000 --- a/deps/v8/src/cfg.cc +++ /dev/null @@ -1,763 +0,0 @@ -// Copyright 2009 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "v8.h" - -#include "bootstrapper.h" -#include "cfg.h" -#include "scopeinfo.h" -#include "scopes.h" - -namespace v8 { -namespace internal { - - -CfgGlobals* CfgGlobals::top_ = NULL; - - -CfgGlobals::CfgGlobals(FunctionLiteral* fun) - : global_fun_(fun), - global_exit_(new ExitNode()), - nowhere_(new Nowhere()), -#ifdef DEBUG - node_counter_(0), - temp_counter_(0), -#endif - previous_(top_) { - top_ = this; -} - - -#define BAILOUT(reason) \ - do { return NULL; } while (false) - -Cfg* Cfg::Build() { - FunctionLiteral* fun = CfgGlobals::current()->fun(); - if (fun->scope()->num_heap_slots() > 0) { - BAILOUT("function has context slots"); - } - if (fun->scope()->num_stack_slots() > kBitsPerPointer) { - BAILOUT("function has too many locals"); - } - if (fun->scope()->num_parameters() > kBitsPerPointer - 1) { - BAILOUT("function has too many parameters"); - } - if (fun->scope()->arguments() != NULL) { - BAILOUT("function uses .arguments"); - } - - ZoneList* body = fun->body(); - if (body->is_empty()) { - BAILOUT("empty function body"); - } - - StatementCfgBuilder builder; - builder.VisitStatements(body); - Cfg* graph = builder.graph(); - if (graph == NULL) { - BAILOUT("unsupported statement type"); - } - if (graph->is_empty()) { - BAILOUT("function body produces empty cfg"); - } - if (graph->has_exit()) { - BAILOUT("control path without explicit return"); - } - graph->PrependEntryNode(); - return graph; -} - -#undef BAILOUT - - -void Cfg::PrependEntryNode() { - ASSERT(!is_empty()); - entry_ = new EntryNode(InstructionBlock::cast(entry())); -} - - -void Cfg::Append(Instruction* instr) { - ASSERT(is_empty() || has_exit()); - if (is_empty()) { - entry_ = exit_ = new InstructionBlock(); - } - InstructionBlock::cast(exit_)->Append(instr); -} - - -void Cfg::AppendReturnInstruction(Value* value) { - Append(new ReturnInstr(value)); - ExitNode* global_exit = CfgGlobals::current()->exit(); - InstructionBlock::cast(exit_)->set_successor(global_exit); - exit_ = NULL; -} - - -void Cfg::Concatenate(Cfg* other) { - ASSERT(is_empty() || has_exit()); - if (other->is_empty()) return; - - if (is_empty()) { - entry_ = other->entry(); - exit_ = other->exit(); - } else { - // We have a pair of nonempty fragments and this has an available exit. - // Destructively glue the fragments together. - InstructionBlock* first = InstructionBlock::cast(exit_); - InstructionBlock* second = InstructionBlock::cast(other->entry()); - first->instructions()->AddAll(*second->instructions()); - if (second->successor() != NULL) { - first->set_successor(second->successor()); - exit_ = other->exit(); - } - } -} - - -void InstructionBlock::Unmark() { - if (is_marked_) { - is_marked_ = false; - successor_->Unmark(); - } -} - - -void EntryNode::Unmark() { - if (is_marked_) { - is_marked_ = false; - successor_->Unmark(); - } -} - - -void ExitNode::Unmark() { - is_marked_ = false; -} - - -Handle Cfg::Compile(Handle