From 2b34363d03e0718c9e9f39982c723b806558c759 Mon Sep 17 00:00:00 2001 From: Ryan Dahl Date: Fri, 21 May 2010 09:41:50 -0700 Subject: [PATCH] Upgrade V8 to 2.2.11 --- deps/v8/ChangeLog | 28 +- deps/v8/SConstruct | 15 - deps/v8/benchmarks/README.txt | 8 + deps/v8/benchmarks/base.js | 2 +- deps/v8/benchmarks/raytrace.js | 31 - deps/v8/benchmarks/revisions.html | 7 + deps/v8/benchmarks/run.html | 6 +- deps/v8/benchmarks/splay.js | 11 +- deps/v8/include/v8-debug.h | 48 +- deps/v8/include/v8-profiler.h | 25 +- deps/v8/samples/shell.cc | 5 + deps/v8/src/SConscript | 5 - deps/v8/src/api.cc | 55 +- deps/v8/src/arm/assembler-arm-inl.h | 7 - deps/v8/src/arm/assembler-arm.cc | 321 ++- deps/v8/src/arm/assembler-arm.h | 29 +- deps/v8/src/arm/assembler-thumb2-inl.h | 263 --- deps/v8/src/arm/assembler-thumb2.cc | 1878 ----------------- deps/v8/src/arm/assembler-thumb2.h | 1036 --------- deps/v8/src/arm/builtins-arm.cc | 6 +- deps/v8/src/arm/codegen-arm.cc | 513 +++-- deps/v8/src/arm/codegen-arm.h | 16 + deps/v8/src/arm/constants-arm.cc | 4 + deps/v8/src/arm/cpu-arm.cc | 4 + deps/v8/src/arm/debug-arm.cc | 11 +- deps/v8/src/arm/disasm-arm.cc | 4 + deps/v8/src/arm/fast-codegen-arm.cc | 4 + deps/v8/src/arm/frames-arm.cc | 8 +- deps/v8/src/arm/full-codegen-arm.cc | 170 +- deps/v8/src/arm/ic-arm.cc | 401 ++-- deps/v8/src/arm/jump-target-arm.cc | 132 +- deps/v8/src/arm/macro-assembler-arm.cc | 4 + deps/v8/src/arm/regexp-macro-assembler-arm.cc | 5 + deps/v8/src/arm/register-allocator-arm.cc | 4 + deps/v8/src/arm/simulator-arm.cc | 4 + deps/v8/src/arm/stub-cache-arm.cc | 52 +- deps/v8/src/arm/virtual-frame-arm-inl.h | 53 + deps/v8/src/arm/virtual-frame-arm.cc | 122 +- deps/v8/src/arm/virtual-frame-arm.h | 106 +- deps/v8/src/assembler.cc | 3 - deps/v8/src/assembler.h | 8 +- deps/v8/src/ast-inl.h | 79 + deps/v8/src/ast.cc | 14 + deps/v8/src/ast.h | 42 +- deps/v8/src/bootstrapper.cc | 2 +- deps/v8/src/builtins.cc | 59 +- deps/v8/src/codegen.h | 3 +- deps/v8/src/compiler.cc | 16 +- deps/v8/src/cpu-profiler-inl.h | 2 +- deps/v8/src/cpu-profiler.cc | 47 +- deps/v8/src/cpu-profiler.h | 16 +- deps/v8/src/d8.js | 57 +- deps/v8/src/date.js | 11 +- deps/v8/src/debug-debugger.js | 31 + deps/v8/src/debug.cc | 72 +- deps/v8/src/debug.h | 24 +- deps/v8/src/flag-definitions.h | 17 +- deps/v8/src/full-codegen.cc | 35 +- deps/v8/src/full-codegen.h | 76 +- deps/v8/src/globals.h | 18 + deps/v8/src/heap.cc | 142 +- deps/v8/src/heap.h | 78 +- deps/v8/src/ia32/assembler-ia32-inl.h | 5 - deps/v8/src/ia32/assembler-ia32.cc | 40 +- deps/v8/src/ia32/assembler-ia32.h | 2 +- deps/v8/src/ia32/builtins-ia32.cc | 29 +- deps/v8/src/ia32/codegen-ia32.cc | 263 +-- deps/v8/src/ia32/codegen-ia32.h | 21 + deps/v8/src/ia32/cpu-ia32.cc | 4 + deps/v8/src/ia32/debug-ia32.cc | 4 + deps/v8/src/ia32/disasm-ia32.cc | 6 + deps/v8/src/ia32/fast-codegen-ia32.cc | 4 + deps/v8/src/ia32/frames-ia32.cc | 4 + deps/v8/src/ia32/full-codegen-ia32.cc | 1493 +++++++++++-- deps/v8/src/ia32/ic-ia32.cc | 6 +- deps/v8/src/ia32/jump-target-ia32.cc | 4 + deps/v8/src/ia32/macro-assembler-ia32.cc | 4 + .../src/ia32/regexp-macro-assembler-ia32.cc | 91 +- .../v8/src/ia32/regexp-macro-assembler-ia32.h | 3 +- deps/v8/src/ia32/register-allocator-ia32.cc | 4 + deps/v8/src/ia32/stub-cache-ia32.cc | 4 + deps/v8/src/ia32/virtual-frame-ia32.cc | 4 + deps/v8/src/ia32/virtual-frame-ia32.h | 40 +- deps/v8/src/jump-target-heavy.cc | 63 + deps/v8/src/jump-target-heavy.h | 242 +++ deps/v8/src/jump-target-light-inl.h | 14 +- deps/v8/src/jump-target-light.cc | 83 +- deps/v8/src/jump-target-light.h | 187 ++ deps/v8/src/jump-target.cc | 64 - deps/v8/src/jump-target.h | 218 +- deps/v8/src/liveedit.cc | 2 +- deps/v8/src/log.cc | 2 +- deps/v8/src/macro-assembler.h | 5 - deps/v8/src/macros.py | 5 + deps/v8/src/mark-compact.cc | 8 + deps/v8/src/mips/assembler-mips.cc | 4 + deps/v8/src/mips/builtins-mips.cc | 3 + deps/v8/src/mips/codegen-mips.cc | 4 + deps/v8/src/mips/constants-mips.cc | 5 + deps/v8/src/mips/cpu-mips.cc | 4 + deps/v8/src/mips/debug-mips.cc | 3 + deps/v8/src/mips/disasm-mips.cc | 3 + deps/v8/src/mips/fast-codegen-mips.cc | 3 + deps/v8/src/mips/frames-mips.cc | 3 + deps/v8/src/mips/full-codegen-mips.cc | 4 + deps/v8/src/mips/ic-mips.cc | 3 + deps/v8/src/mips/jump-target-mips.cc | 3 + deps/v8/src/mips/macro-assembler-mips.cc | 3 + deps/v8/src/mips/register-allocator-mips.cc | 3 + deps/v8/src/mips/simulator-mips.cc | 3 + deps/v8/src/mips/stub-cache-mips.cc | 3 + deps/v8/src/mips/virtual-frame-mips.cc | 3 + deps/v8/src/objects-inl.h | 12 +- deps/v8/src/objects.cc | 61 +- deps/v8/src/objects.h | 34 +- deps/v8/src/parser.cc | 5 +- deps/v8/src/platform-solaris.cc | 33 +- deps/v8/src/platform.h | 3 +- deps/v8/src/profile-generator-inl.h | 17 +- deps/v8/src/profile-generator.cc | 287 ++- deps/v8/src/profile-generator.h | 62 +- deps/v8/src/register-allocator.cc | 7 +- deps/v8/src/runtime.js | 15 +- deps/v8/src/serialize.cc | 484 +++-- deps/v8/src/serialize.h | 129 +- deps/v8/src/string.js | 193 +- deps/v8/src/third_party/dtoa/dtoa.c | 4 + deps/v8/src/v8natives.js | 21 +- deps/v8/src/version.cc | 2 +- deps/v8/src/virtual-frame-heavy-inl.h | 40 + deps/v8/src/virtual-frame-light-inl.h | 93 +- deps/v8/src/virtual-frame-light.cc | 3 + deps/v8/src/x64/assembler-x64.cc | 138 +- deps/v8/src/x64/assembler-x64.h | 11 +- deps/v8/src/x64/builtins-x64.cc | 5 + deps/v8/src/x64/codegen-x64.cc | 552 ++--- deps/v8/src/x64/codegen-x64.h | 29 +- deps/v8/src/x64/cpu-x64.cc | 4 + deps/v8/src/x64/debug-x64.cc | 10 +- deps/v8/src/x64/disasm-x64.cc | 5 + deps/v8/src/x64/fast-codegen-x64.cc | 4 + deps/v8/src/x64/frames-x64.cc | 4 + deps/v8/src/x64/full-codegen-x64.cc | 1510 +++++++++++-- deps/v8/src/x64/ic-x64.cc | 246 +-- deps/v8/src/x64/jump-target-x64.cc | 4 + deps/v8/src/x64/macro-assembler-x64.cc | 24 +- deps/v8/src/x64/macro-assembler-x64.h | 10 +- deps/v8/src/x64/regexp-macro-assembler-x64.cc | 104 +- deps/v8/src/x64/regexp-macro-assembler-x64.h | 3 +- deps/v8/src/x64/register-allocator-x64.cc | 4 + deps/v8/src/x64/stub-cache-x64.cc | 13 +- deps/v8/src/x64/virtual-frame-x64.cc | 144 +- deps/v8/src/x64/virtual-frame-x64.h | 69 +- deps/v8/test/cctest/test-cpu-profiler.cc | 6 +- deps/v8/test/cctest/test-debug.cc | 25 +- deps/v8/test/cctest/test-disasm-ia32.cc | 1 + deps/v8/test/cctest/test-log-stack-tracer.cc | 64 +- deps/v8/test/cctest/test-profile-generator.cc | 214 +- .../mjsunit/arguments-load-across-eval.js | 86 + deps/v8/test/mjsunit/array-concat.js | 79 +- deps/v8/test/mjsunit/array-pop.js | 28 + deps/v8/test/mjsunit/array-shift.js | 37 + deps/v8/test/mjsunit/array-slice.js | 47 + deps/v8/test/mjsunit/array-splice.js | 57 +- deps/v8/test/mjsunit/array-unshift.js | 79 +- deps/v8/test/mjsunit/compiler/assignment.js | 12 + deps/v8/test/mjsunit/mjsunit.js | 2 + deps/v8/test/mjsunit/mjsunit.status | 16 +- .../test/mjsunit/property-load-across-eval.js | 2 + deps/v8/test/mozilla/mozilla.status | 1 + deps/v8/tools/gc-nvp-trace-processor.py | 282 +++ deps/v8/tools/gyp/v8.gyp | 44 +- deps/v8/tools/v8.xcodeproj/project.pbxproj | 8 - deps/v8/tools/visual_studio/v8_base.vcproj | 8 + .../v8/tools/visual_studio/v8_base_arm.vcproj | 12 + .../v8/tools/visual_studio/v8_base_x64.vcproj | 8 + 176 files changed, 8628 insertions(+), 6128 deletions(-) delete mode 100644 deps/v8/src/arm/assembler-thumb2-inl.h delete mode 100644 deps/v8/src/arm/assembler-thumb2.cc delete mode 100644 deps/v8/src/arm/assembler-thumb2.h create mode 100644 deps/v8/src/arm/virtual-frame-arm-inl.h create mode 100644 deps/v8/src/ast-inl.h create mode 100644 deps/v8/src/jump-target-heavy.h create mode 100644 deps/v8/src/jump-target-light.h create mode 100644 deps/v8/test/mjsunit/arguments-load-across-eval.js create mode 100644 deps/v8/tools/gc-nvp-trace-processor.py diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index c2d4e46a63..33b6d142f7 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,29 @@ +2010-05-21: Version 2.2.11 + + Fix crash bug in liveedit on 64 bit. + + Use 'full compiler' when debugging is active. This should increase + the density of possible break points, making single step more fine + grained. This will only take effect for functions compiled after + debugging has been started, so recompilation of all functions is + required to get the full effect. IA32 and x64 only for now. + + Misc. fixes to the Solaris build. + + Add new flags --print-cumulative-gc-stat and --trace-gc-nvp. + + Add filtering of CPU profiles by security context. + + Fix crash bug on ARM when running without VFP2 or VFP3. + + Incremental performance improvements in all backends. + + +2010-05-17: Version 2.2.10 + + Performance improvements in the x64 and ARM backends. + + 2010-05-10: Version 2.2.9 Allow Object.create to be called with a function (issue 697). @@ -6,7 +32,7 @@ non date string (issue 696). Allow unaligned memory accesses on ARM targets that support it (by - Subrato K De of CodeAurora ). + Subrato K De of CodeAurora ). C++ API for retrieving JavaScript stack trace information. diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct index 597d033459..7cf866ca55 100644 --- a/deps/v8/SConstruct +++ b/deps/v8/SConstruct @@ -210,12 +210,6 @@ LIBRARY_FLAGS = { 'CCFLAGS': ['-m32', '-DCAN_USE_UNALIGNED_ACCESSES=1'], 'LINKFLAGS': ['-m32'] }, - 'armvariant:thumb2': { - 'CPPDEFINES': ['V8_ARM_VARIANT_THUMB'] - }, - 'armvariant:arm': { - 'CPPDEFINES': ['V8_ARM_VARIANT_ARM'] - }, 'arch:mips': { 'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'], 'simulator:none': { @@ -764,11 +758,6 @@ SIMPLE_OPTIONS = { 'default': 'hidden', 'help': 'shared library symbol visibility' }, - 'armvariant': { - 'values': ['arm', 'thumb2', 'none'], - 'default': 'none', - 'help': 'generate thumb2 instructions instead of arm instructions (default)' - }, 'pgo': { 'values': ['off', 'instrument', 'optimize'], 'default': 'off', @@ -962,10 +951,6 @@ def PostprocessOptions(options, os): if 'msvcltcg' in ARGUMENTS: print "Warning: forcing msvcltcg on as it is required for pgo (%s)" % options['pgo'] options['msvcltcg'] = 'on' - if (options['armvariant'] == 'none' and options['arch'] == 'arm'): - options['armvariant'] = 'arm' - if (options['armvariant'] != 'none' and options['arch'] != 'arm'): - options['armvariant'] = 'none' if options['arch'] == 'mips': if ('regexp' in ARGUMENTS) and options['regexp'] == 'native': # Print a warning if native regexp is specified for mips diff --git a/deps/v8/benchmarks/README.txt b/deps/v8/benchmarks/README.txt index eb759cc92c..8e08159da8 100644 --- a/deps/v8/benchmarks/README.txt +++ b/deps/v8/benchmarks/README.txt @@ -61,3 +61,11 @@ Removed duplicate line in random seed code, and changed the name of the Object.prototype.inherits function in the DeltaBlue benchmark to inheritsFrom to avoid name clashes when running in Chromium with extensions enabled. + + +Changes from Version 5 to Version 6 +=================================== + +Removed dead code from the RayTrace benchmark and changed the Splay +benchmark to avoid converting the same numeric key to a string over +and over again. diff --git a/deps/v8/benchmarks/base.js b/deps/v8/benchmarks/base.js index 67cddd205e..ce308419ed 100644 --- a/deps/v8/benchmarks/base.js +++ b/deps/v8/benchmarks/base.js @@ -78,7 +78,7 @@ BenchmarkSuite.suites = []; // Scores are not comparable across versions. Bump the version if // you're making changes that will affect that scores, e.g. if you add // a new benchmark or change an existing one. -BenchmarkSuite.version = '5'; +BenchmarkSuite.version = '6 (candidate)'; // To make the benchmark results predictable, we replace Math.random diff --git a/deps/v8/benchmarks/raytrace.js b/deps/v8/benchmarks/raytrace.js index c68b0383a3..da4d5924aa 100644 --- a/deps/v8/benchmarks/raytrace.js +++ b/deps/v8/benchmarks/raytrace.js @@ -205,12 +205,6 @@ Flog.RayTracer.Light.prototype = { this.intensity = (intensity ? intensity : 10.0); }, - getIntensity: function(distance){ - if(distance >= intensity) return 0; - - return Math.pow((intensity - distance) / strength, 0.2); - }, - toString : function () { return 'Light [' + this.position.x + ',' + this.position.y + ',' + this.position.z + ']'; } @@ -420,31 +414,6 @@ if(typeof(Flog) == 'undefined') var Flog = {}; if(typeof(Flog.RayTracer) == 'undefined') Flog.RayTracer = {}; if(typeof(Flog.RayTracer.Shape) == 'undefined') Flog.RayTracer.Shape = {}; -Flog.RayTracer.Shape.BaseShape = Class.create(); - -Flog.RayTracer.Shape.BaseShape.prototype = { - position: null, - material: null, - - initialize : function() { - this.position = new Vector(0,0,0); - this.material = new Flog.RayTracer.Material.SolidMaterial( - new Flog.RayTracer.Color(1,0,1), - 0, - 0, - 0 - ); - }, - - toString : function () { - return 'Material [gloss=' + this.gloss + ', transparency=' + this.transparency + ', hasTexture=' + this.hasTexture +']'; - } -} -/* Fake a Flog.* namespace */ -if(typeof(Flog) == 'undefined') var Flog = {}; -if(typeof(Flog.RayTracer) == 'undefined') Flog.RayTracer = {}; -if(typeof(Flog.RayTracer.Shape) == 'undefined') Flog.RayTracer.Shape = {}; - Flog.RayTracer.Shape.Sphere = Class.create(); Flog.RayTracer.Shape.Sphere.prototype = { diff --git a/deps/v8/benchmarks/revisions.html b/deps/v8/benchmarks/revisions.html index 99d7be42b9..b03aa126d6 100644 --- a/deps/v8/benchmarks/revisions.html +++ b/deps/v8/benchmarks/revisions.html @@ -20,6 +20,13 @@ the benchmark suite.

+

Version 6 (link)

+ +

Removed dead code from the RayTrace benchmark and changed the Splay +benchmark to avoid converting the same numeric key to a string over +and over again. +

+

Version 5 (link)

Removed duplicate line in random seed code, and changed the name of diff --git a/deps/v8/benchmarks/run.html b/deps/v8/benchmarks/run.html index ef2c186412..30036b7843 100644 --- a/deps/v8/benchmarks/run.html +++ b/deps/v8/benchmarks/run.html @@ -111,12 +111,12 @@ higher scores means better performance: Bigger is better!

  • Richards
    OS kernel simulation benchmark, originally written in BCPL by Martin Richards (539 lines).
  • DeltaBlue
    One-way constraint solver, originally written in Smalltalk by John Maloney and Mario Wolczko (880 lines).
  • Crypto
    Encryption and decryption benchmark based on code by Tom Wu (1698 lines).
  • -
  • RayTrace
    Ray tracer benchmark based on code by Adam Burmister (935 lines).
  • -
  • EarleyBoyer
    Classic Scheme benchmarks, translated to JavaScript by Florian Loitsch's Scheme2Js compiler (4685 lines).
  • +
  • RayTrace
    Ray tracer benchmark based on code by Adam Burmister (904 lines).
  • +
  • EarleyBoyer
    Classic Scheme benchmarks, translated to JavaScript by Florian Loitsch's Scheme2Js compiler (4684 lines).
  • RegExp
    Regular expression benchmark generated by extracting regular expression operations from 50 of the most popular web pages (1614 lines).
  • -
  • Splay
    Data manipulation benchmark that deals with splay trees and exercises the automatic memory management subsystem (378 lines).
  • +
  • Splay
    Data manipulation benchmark that deals with splay trees and exercises the automatic memory management subsystem (379 lines).
  • diff --git a/deps/v8/benchmarks/splay.js b/deps/v8/benchmarks/splay.js index 53fc72793e..d8c8f04271 100644 --- a/deps/v8/benchmarks/splay.js +++ b/deps/v8/benchmarks/splay.js @@ -46,16 +46,16 @@ var kSplayTreePayloadDepth = 5; var splayTree = null; -function GeneratePayloadTree(depth, key) { +function GeneratePayloadTree(depth, tag) { if (depth == 0) { return { array : [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ], - string : 'String for key ' + key + ' in leaf node' + string : 'String for key ' + tag + ' in leaf node' }; } else { return { - left: GeneratePayloadTree(depth - 1, key), - right: GeneratePayloadTree(depth - 1, key) + left: GeneratePayloadTree(depth - 1, tag), + right: GeneratePayloadTree(depth - 1, tag) }; } } @@ -74,7 +74,8 @@ function InsertNewNode() { do { key = GenerateKey(); } while (splayTree.find(key) != null); - splayTree.insert(key, GeneratePayloadTree(kSplayTreePayloadDepth, key)); + var payload = GeneratePayloadTree(kSplayTreePayloadDepth, String(key)); + splayTree.insert(key, payload); return key; } diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h index f7b4fa12e3..c53b63462a 100644 --- a/deps/v8/include/v8-debug.h +++ b/deps/v8/include/v8-debug.h @@ -143,6 +143,39 @@ class EXPORT Debug { }; + /** + * An event details object passed to the debug event listener. + */ + class EventDetails { + public: + /** + * Event type. + */ + virtual DebugEvent GetEvent() const = 0; + + /** + * Access to execution state and event data of the debug event. Don't store + * these cross callbacks as their content becomes invalid. + */ + virtual Handle GetExecutionState() const = 0; + virtual Handle GetEventData() const = 0; + + /** + * Get the context active when the debug event happened. Note this is not + * the current active context as the JavaScript part of the debugger is + * running in it's own context which is entered at this point. + */ + virtual Handle GetEventContext() const = 0; + + /** + * Client data passed with the corresponding callbak whet it was registered. + */ + virtual Handle GetCallbackData() const = 0; + + virtual ~EventDetails() {} + }; + + /** * Debug event callback function. * @@ -157,6 +190,15 @@ class EXPORT Debug { Handle event_data, Handle data); + /** + * Debug event callback function. + * + * \param event_details object providing information about the debug event + * + * A EventCallback2 does not take possession of the event data, + * and must not rely on the data persisting after the handler returns. + */ + typedef void (*EventCallback2)(const EventDetails& event_details); /** * Debug message callback function. @@ -165,7 +207,7 @@ class EXPORT Debug { * \param length length of the message * \param client_data the data value passed when registering the message handler - * A MessageHandler does not take posession of the message string, + * A MessageHandler does not take possession of the message string, * and must not rely on the data persisting after the handler returns. * * This message handler is deprecated. Use MessageHandler2 instead. @@ -178,7 +220,7 @@ class EXPORT Debug { * * \param message the debug message handler message object - * A MessageHandler does not take posession of the message data, + * A MessageHandler does not take possession of the message data, * and must not rely on the data persisting after the handler returns. */ typedef void (*MessageHandler2)(const Message& message); @@ -196,6 +238,8 @@ class EXPORT Debug { // Set a C debug event listener. static bool SetDebugEventListener(EventCallback that, Handle data = Handle()); + static bool SetDebugEventListener2(EventCallback2 that, + Handle data = Handle()); // Set a JavaScript debug event listener. static bool SetDebugEventListener(v8::Handle that, diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h index f1b8ffbbdc..bb4107221c 100644 --- a/deps/v8/include/v8-profiler.h +++ b/deps/v8/include/v8-profiler.h @@ -139,6 +139,15 @@ class V8EXPORT CpuProfile { */ class V8EXPORT CpuProfiler { public: + /** + * A note on security tokens usage. As scripts from different + * origins can run inside a single V8 instance, it is possible to + * have functions from different security contexts intermixed in a + * single CPU profile. To avoid exposing function names belonging to + * other contexts, filtering by security token is performed while + * obtaining profiling results. + */ + /** * Returns the number of profiles collected (doesn't include * profiles that are being collected at the moment of call.) @@ -146,16 +155,22 @@ class V8EXPORT CpuProfiler { static int GetProfilesCount(); /** Returns a profile by index. */ - static const CpuProfile* GetProfile(int index); + static const CpuProfile* GetProfile( + int index, + Handle security_token = Handle()); /** Returns a profile by uid. */ - static const CpuProfile* FindProfile(unsigned uid); + static const CpuProfile* FindProfile( + unsigned uid, + Handle security_token = Handle()); /** * Starts collecting CPU profile. Title may be an empty string. It * is allowed to have several profiles being collected at * once. Attempts to start collecting several profiles with the same - * title are silently ignored. + * title are silently ignored. While collecting a profile, functions + * from all security contexts are included in it. The token-based + * filtering is only performed when querying for a profile. */ static void StartProfiling(Handle title); @@ -163,7 +178,9 @@ class V8EXPORT CpuProfiler { * Stops collecting CPU profile with a given title and returns it. * If the title given is empty, finishes the last profile started. */ - static const CpuProfile* StopProfiling(Handle title); + static const CpuProfile* StopProfiling( + Handle title, + Handle security_token = Handle()); }; diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc index 27ed293e7e..1a13f5f80b 100644 --- a/deps/v8/samples/shell.cc +++ b/deps/v8/samples/shell.cc @@ -299,5 +299,10 @@ void ReportException(v8::TryCatch* try_catch) { printf("^"); } printf("\n"); + v8::String::Utf8Value stack_trace(try_catch->StackTrace()); + if (stack_trace.length() > 0) { + const char* stack_trace_string = ToCString(stack_trace); + printf("%s\n", stack_trace_string); + } } } diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript index b68f6d1d23..8466a0c557 100755 --- a/deps/v8/src/SConscript +++ b/deps/v8/src/SConscript @@ -136,13 +136,8 @@ SOURCES = { arm/register-allocator-arm.cc arm/stub-cache-arm.cc arm/virtual-frame-arm.cc - """), - 'armvariant:arm': Split(""" arm/assembler-arm.cc """), - 'armvariant:thumb2': Split(""" - arm/assembler-thumb2.cc - """), 'arch:mips': Split(""" fast-codegen.cc mips/assembler-mips.cc diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index a4c38b72c2..cf940c6e00 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -48,7 +48,7 @@ #define LOG_API(expr) LOG(ApiEntryCall(expr)) -#ifdef ENABLE_HEAP_PROTECTION +#ifdef ENABLE_VMSTATE_TRACKING #define ENTER_V8 i::VMState __state__(i::OTHER) #define LEAVE_V8 i::VMState __state__(i::EXTERNAL) #else @@ -3992,10 +3992,40 @@ Local Exception::Error(v8::Handle raw_message) { // --- D e b u g S u p p o r t --- #ifdef ENABLE_DEBUGGER_SUPPORT + +static v8::Debug::EventCallback event_callback = NULL; + +static void EventCallbackWrapper(const v8::Debug::EventDetails& event_details) { + if (event_callback) { + event_callback(event_details.GetEvent(), + event_details.GetExecutionState(), + event_details.GetEventData(), + event_details.GetCallbackData()); + } +} + + bool Debug::SetDebugEventListener(EventCallback that, Handle data) { EnsureInitialized("v8::Debug::SetDebugEventListener()"); ON_BAILOUT("v8::Debug::SetDebugEventListener()", return false); ENTER_V8; + + event_callback = that; + + HandleScope scope; + i::Handle proxy = i::Factory::undefined_value(); + if (that != NULL) { + proxy = i::Factory::NewProxy(FUNCTION_ADDR(EventCallbackWrapper)); + } + i::Debugger::SetEventListener(proxy, Utils::OpenHandle(*data)); + return true; +} + + +bool Debug::SetDebugEventListener2(EventCallback2 that, Handle data) { + EnsureInitialized("v8::Debug::SetDebugEventListener2()"); + ON_BAILOUT("v8::Debug::SetDebugEventListener2()", return false); + ENTER_V8; HandleScope scope; i::Handle proxy = i::Factory::undefined_value(); if (that != NULL) { @@ -4250,15 +4280,23 @@ int CpuProfiler::GetProfilesCount() { } -const CpuProfile* CpuProfiler::GetProfile(int index) { +const CpuProfile* CpuProfiler::GetProfile(int index, + Handle security_token) { IsDeadCheck("v8::CpuProfiler::GetProfile"); - return reinterpret_cast(i::CpuProfiler::GetProfile(index)); + return reinterpret_cast( + i::CpuProfiler::GetProfile( + security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token), + index)); } -const CpuProfile* CpuProfiler::FindProfile(unsigned uid) { +const CpuProfile* CpuProfiler::FindProfile(unsigned uid, + Handle security_token) { IsDeadCheck("v8::CpuProfiler::FindProfile"); - return reinterpret_cast(i::CpuProfiler::FindProfile(uid)); + return reinterpret_cast( + i::CpuProfiler::FindProfile( + security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token), + uid)); } @@ -4268,10 +4306,13 @@ void CpuProfiler::StartProfiling(Handle title) { } -const CpuProfile* CpuProfiler::StopProfiling(Handle title) { +const CpuProfile* CpuProfiler::StopProfiling(Handle title, + Handle security_token) { IsDeadCheck("v8::CpuProfiler::StopProfiling"); return reinterpret_cast( - i::CpuProfiler::StopProfiling(*Utils::OpenHandle(*title))); + i::CpuProfiler::StopProfiling( + security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token), + *Utils::OpenHandle(*title))); } #endif // ENABLE_LOGGING_AND_PROFILING diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h index 3f0854e333..a5c5bd1440 100644 --- a/deps/v8/src/arm/assembler-arm-inl.h +++ b/deps/v8/src/arm/assembler-arm-inl.h @@ -169,13 +169,6 @@ Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) { } -Operand::Operand(const char* s) { - rm_ = no_reg; - imm32_ = reinterpret_cast(s); - rmode_ = RelocInfo::EMBEDDED_STRING; -} - - Operand::Operand(const ExternalReference& f) { rm_ = no_reg; imm32_ = reinterpret_cast(f.address()); diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index f1f59ced7f..dba62e62c6 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -36,6 +36,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_ARM) + #include "arm/assembler-arm-inl.h" #include "serialize.h" @@ -106,6 +108,15 @@ void CpuFeatures::Probe() { const int RelocInfo::kApplyMask = 0; +bool RelocInfo::IsCodedSpecially() { + // The deserializer needs to know whether a pointer is specially coded. Being + // specially coded on ARM means that it is a movw/movt instruction. We don't + // generate those yet. + return false; +} + + + void RelocInfo::PatchCode(byte* instructions, int instruction_count) { // Patch the code at the current address with the supplied instructions. Instr* pc = reinterpret_cast(pc_); @@ -268,6 +279,20 @@ const Instr kBlxRegMask = 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4; const Instr kBlxRegPattern = B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4; +// A mask for the Rd register for push, pop, ldr, str instructions. +const Instr kRdMask = 0x0000f000; +static const int kRdShift = 12; +static const Instr kLdrRegFpOffsetPattern = + al | B26 | L | Offset | fp.code() * B16; +static const Instr kStrRegFpOffsetPattern = + al | B26 | Offset | fp.code() * B16; +static const Instr kLdrRegFpNegOffsetPattern = + al | B26 | L | NegOffset | fp.code() * B16; +static const Instr kStrRegFpNegOffsetPattern = + al | B26 | NegOffset | fp.code() * B16; +static const Instr kLdrStrInstrTypeMask = 0xffff0000; +static const Instr kLdrStrInstrArgumentMask = 0x0000ffff; +static const Instr kLdrStrOffsetMask = 0x00000fff; // Spare buffer. static const int kMinimalBufferSize = 4*KB; @@ -395,6 +420,43 @@ Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) { } +Register Assembler::GetRd(Instr instr) { + Register reg; + reg.code_ = ((instr & kRdMask) >> kRdShift); + return reg; +} + + +bool Assembler::IsPush(Instr instr) { + return ((instr & ~kRdMask) == kPushRegPattern); +} + + +bool Assembler::IsPop(Instr instr) { + return ((instr & ~kRdMask) == kPopRegPattern); +} + + +bool Assembler::IsStrRegFpOffset(Instr instr) { + return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern); +} + + +bool Assembler::IsLdrRegFpOffset(Instr instr) { + return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern); +} + + +bool Assembler::IsStrRegFpNegOffset(Instr instr) { + return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern); +} + + +bool Assembler::IsLdrRegFpNegOffset(Instr instr) { + return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern); +} + + // Labels refer to positions in the (to be) generated code. // There are bound, linked, and unused labels. // @@ -887,15 +949,12 @@ void Assembler::add(Register dst, Register src1, const Operand& src2, // str(src, MemOperand(sp, 4, NegPreIndex), al); // add(sp, sp, Operand(kPointerSize)); // Both instructions can be eliminated. - int pattern_size = 2 * kInstrSize; - if (FLAG_push_pop_elimination && - last_bound_pos_ <= (pc_offset() - pattern_size) && - reloc_info_writer.last_pc() <= (pc_ - pattern_size) && + if (can_peephole_optimize(2) && // Pattern. instr_at(pc_ - 1 * kInstrSize) == kPopInstruction && (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) { pc_ -= 2 * kInstrSize; - if (FLAG_print_push_pop_elimination) { + if (FLAG_print_peephole_optimization) { PrintF("%x push(reg)/pop() eliminated\n", pc_offset()); } } @@ -1086,20 +1145,170 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) { } addrmod2(cond | B26 | L, dst, src); - // Eliminate pattern: push(r), pop(r) - // str(r, MemOperand(sp, 4, NegPreIndex), al) - // ldr(r, MemOperand(sp, 4, PostIndex), al) - // Both instructions can be eliminated. - int pattern_size = 2 * kInstrSize; - if (FLAG_push_pop_elimination && - last_bound_pos_ <= (pc_offset() - pattern_size) && - reloc_info_writer.last_pc() <= (pc_ - pattern_size) && - // Pattern. - instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) && - instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) { - pc_ -= 2 * kInstrSize; - if (FLAG_print_push_pop_elimination) { - PrintF("%x push/pop (same reg) eliminated\n", pc_offset()); + // Eliminate pattern: push(ry), pop(rx) + // str(ry, MemOperand(sp, 4, NegPreIndex), al) + // ldr(rx, MemOperand(sp, 4, PostIndex), al) + // Both instructions can be eliminated if ry = rx. + // If ry != rx, a register copy from ry to rx is inserted + // after eliminating the push and the pop instructions. + Instr push_instr = instr_at(pc_ - 2 * kInstrSize); + Instr pop_instr = instr_at(pc_ - 1 * kInstrSize); + + if (can_peephole_optimize(2) && + IsPush(push_instr) && + IsPop(pop_instr)) { + if ((pop_instr & kRdMask) != (push_instr & kRdMask)) { + // For consecutive push and pop on different registers, + // we delete both the push & pop and insert a register move. + // push ry, pop rx --> mov rx, ry + Register reg_pushed, reg_popped; + reg_pushed = GetRd(push_instr); + reg_popped = GetRd(pop_instr); + pc_ -= 2 * kInstrSize; + // Insert a mov instruction, which is better than a pair of push & pop + mov(reg_popped, reg_pushed); + if (FLAG_print_peephole_optimization) { + PrintF("%x push/pop (diff reg) replaced by a reg move\n", pc_offset()); + } + } else { + // For consecutive push and pop on the same register, + // both the push and the pop can be deleted. + pc_ -= 2 * kInstrSize; + if (FLAG_print_peephole_optimization) { + PrintF("%x push/pop (same reg) eliminated\n", pc_offset()); + } + } + } + + if (can_peephole_optimize(2)) { + Instr str_instr = instr_at(pc_ - 2 * kInstrSize); + Instr ldr_instr = instr_at(pc_ - 1 * kInstrSize); + + if ((IsStrRegFpOffset(str_instr) && + IsLdrRegFpOffset(ldr_instr)) || + (IsStrRegFpNegOffset(str_instr) && + IsLdrRegFpNegOffset(ldr_instr))) { + if ((ldr_instr & kLdrStrInstrArgumentMask) == + (str_instr & kLdrStrInstrArgumentMask)) { + // Pattern: Ldr/str same fp+offset, same register. + // + // The following: + // str rx, [fp, #-12] + // ldr rx, [fp, #-12] + // + // Becomes: + // str rx, [fp, #-12] + + pc_ -= 1 * kInstrSize; + if (FLAG_print_peephole_optimization) { + PrintF("%x str/ldr (fp + same offset), same reg\n", pc_offset()); + } + } else if ((ldr_instr & kLdrStrOffsetMask) == + (str_instr & kLdrStrOffsetMask)) { + // Pattern: Ldr/str same fp+offset, different register. + // + // The following: + // str rx, [fp, #-12] + // ldr ry, [fp, #-12] + // + // Becomes: + // str rx, [fp, #-12] + // mov ry, rx + + Register reg_stored, reg_loaded; + reg_stored = GetRd(str_instr); + reg_loaded = GetRd(ldr_instr); + pc_ -= 1 * kInstrSize; + // Insert a mov instruction, which is better than ldr. + mov(reg_loaded, reg_stored); + if (FLAG_print_peephole_optimization) { + PrintF("%x str/ldr (fp + same offset), diff reg \n", pc_offset()); + } + } + } + } + + if (can_peephole_optimize(3)) { + Instr mem_write_instr = instr_at(pc_ - 3 * kInstrSize); + Instr ldr_instr = instr_at(pc_ - 2 * kInstrSize); + Instr mem_read_instr = instr_at(pc_ - 1 * kInstrSize); + if (IsPush(mem_write_instr) && + IsPop(mem_read_instr)) { + if ((IsLdrRegFpOffset(ldr_instr) || + IsLdrRegFpNegOffset(ldr_instr))) { + if ((mem_write_instr & kRdMask) == + (mem_read_instr & kRdMask)) { + // Pattern: push & pop from/to same register, + // with a fp+offset ldr in between + // + // The following: + // str rx, [sp, #-4]! + // ldr rz, [fp, #-24] + // ldr rx, [sp], #+4 + // + // Becomes: + // if(rx == rz) + // delete all + // else + // ldr rz, [fp, #-24] + + if ((mem_write_instr & kRdMask) == (ldr_instr & kRdMask)) { + pc_ -= 3 * kInstrSize; + } else { + pc_ -= 3 * kInstrSize; + // Reinsert back the ldr rz. + emit(ldr_instr); + } + if (FLAG_print_peephole_optimization) { + PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset()); + } + } else { + // Pattern: push & pop from/to different registers + // with a fp+offset ldr in between + // + // The following: + // str rx, [sp, #-4]! + // ldr rz, [fp, #-24] + // ldr ry, [sp], #+4 + // + // Becomes: + // if(ry == rz) + // mov ry, rx; + // else if(rx != rz) + // ldr rz, [fp, #-24] + // mov ry, rx + // else if((ry != rz) || (rx == rz)) becomes: + // mov ry, rx + // ldr rz, [fp, #-24] + + Register reg_pushed, reg_popped; + if ((mem_read_instr & kRdMask) == (ldr_instr & kRdMask)) { + reg_pushed = GetRd(mem_write_instr); + reg_popped = GetRd(mem_read_instr); + pc_ -= 3 * kInstrSize; + mov(reg_popped, reg_pushed); + } else if ((mem_write_instr & kRdMask) + != (ldr_instr & kRdMask)) { + reg_pushed = GetRd(mem_write_instr); + reg_popped = GetRd(mem_read_instr); + pc_ -= 3 * kInstrSize; + emit(ldr_instr); + mov(reg_popped, reg_pushed); + } else if (((mem_read_instr & kRdMask) + != (ldr_instr & kRdMask)) || + ((mem_write_instr & kRdMask) + == (ldr_instr & kRdMask)) ) { + reg_pushed = GetRd(mem_write_instr); + reg_popped = GetRd(mem_read_instr); + pc_ -= 3 * kInstrSize; + mov(reg_popped, reg_pushed); + emit(ldr_instr); + } + if (FLAG_print_peephole_optimization) { + PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset()); + } + } + } } } } @@ -1111,16 +1320,13 @@ void Assembler::str(Register src, const MemOperand& dst, Condition cond) { // Eliminate pattern: pop(), push(r) // add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al // -> str r, [sp, 0], al - int pattern_size = 2 * kInstrSize; - if (FLAG_push_pop_elimination && - last_bound_pos_ <= (pc_offset() - pattern_size) && - reloc_info_writer.last_pc() <= (pc_ - pattern_size) && + if (can_peephole_optimize(2) && // Pattern. instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) && instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) { pc_ -= 2 * kInstrSize; emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12); - if (FLAG_print_push_pop_elimination) { + if (FLAG_print_peephole_optimization) { PrintF("%x pop()/push(reg) eliminated\n", pc_offset()); } } @@ -1162,12 +1368,18 @@ void Assembler::ldrd(Register dst, const MemOperand& src, Condition cond) { #ifdef CAN_USE_ARMV7_INSTRUCTIONS addrmod3(cond | B7 | B6 | B4, dst, src); #else - ldr(dst, src, cond); + // Generate two ldr instructions if ldrd is not available. MemOperand src1(src); src1.set_offset(src1.offset() + 4); Register dst1(dst); - dst1.code_ = dst1.code_ + 1; - ldr(dst1, src1, cond); + dst1.set_code(dst1.code() + 1); + if (dst.is(src.rn())) { + ldr(dst1, src1, cond); + ldr(dst, src, cond); + } else { + ldr(dst, src, cond); + ldr(dst1, src1, cond); + } #endif } @@ -1177,11 +1389,12 @@ void Assembler::strd(Register src, const MemOperand& dst, Condition cond) { #ifdef CAN_USE_ARMV7_INSTRUCTIONS addrmod3(cond | B7 | B6 | B5 | B4, src, dst); #else - str(src, dst, cond); + // Generate two str instructions if strd is not available. MemOperand dst1(dst); dst1.set_offset(dst1.offset() + 4); Register src1(src); - src1.code_ = src1.code_ + 1; + src1.set_code(src1.code() + 1); + str(src, dst, cond); str(src1, dst1, cond); #endif } @@ -1216,26 +1429,6 @@ void Assembler::stm(BlockAddrMode am, } -// Semaphore instructions. -void Assembler::swp(Register dst, Register src, Register base, Condition cond) { - ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc)); - ASSERT(!dst.is(base) && !src.is(base)); - emit(cond | P | base.code()*B16 | dst.code()*B12 | - B7 | B4 | src.code()); -} - - -void Assembler::swpb(Register dst, - Register src, - Register base, - Condition cond) { - ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc)); - ASSERT(!dst.is(base) && !src.is(base)); - emit(cond | P | B | base.code()*B16 | dst.code()*B12 | - B7 | B4 | src.code()); -} - - // Exception-generating instructions and debugging support. void Assembler::stop(const char* msg) { #ifndef __arm__ @@ -1779,34 +1972,6 @@ void Assembler::nop(int type) { } -void Assembler::lea(Register dst, - const MemOperand& x, - SBit s, - Condition cond) { - int am = x.am_; - if (!x.rm_.is_valid()) { - // Immediate offset. - if ((am & P) == 0) // post indexing - mov(dst, Operand(x.rn_), s, cond); - else if ((am & U) == 0) // negative indexing - sub(dst, x.rn_, Operand(x.offset_), s, cond); - else - add(dst, x.rn_, Operand(x.offset_), s, cond); - } else { - // Register offset (shift_imm_ and shift_op_ are 0) or scaled - // register offset the constructors make sure than both shift_imm_ - // and shift_op_ are initialized. - ASSERT(!x.rm_.is(pc)); - if ((am & P) == 0) // post indexing - mov(dst, Operand(x.rn_), s, cond); - else if ((am & U) == 0) // negative indexing - sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond); - else - add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond); - } -} - - bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { uint32_t dummy1; uint32_t dummy2; @@ -2062,3 +2227,5 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index 61b84d434f..5a5d64b7b6 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -80,6 +80,11 @@ struct Register { return 1 << code_; } + void set_code(int code) { + code_ = code; + ASSERT(is_valid()); + } + // Unfortunately we can't make this private in a struct. int code_; }; @@ -458,7 +463,8 @@ class MemOperand BASE_EMBEDDED { return offset_; } - Register rm() const {return rm_;} + Register rn() const { return rn_; } + Register rm() const { return rm_; } private: Register rn_; // base @@ -774,10 +780,6 @@ class Assembler : public Malloced { void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al); void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al); - // Semaphore instructions - void swp(Register dst, Register src, Register base, Condition cond = al); - void swpb(Register dst, Register src, Register base, Condition cond = al); - // Exception-generating instructions and debugging support void stop(const char* msg); @@ -924,10 +926,6 @@ class Assembler : public Malloced { add(sp, sp, Operand(kPointerSize)); } - // Load effective address of memory operand x into register dst - void lea(Register dst, const MemOperand& x, - SBit s = LeaveCC, Condition cond = al); - // Jump unconditionally to given label. void jmp(Label* L) { b(L, al); } @@ -976,6 +974,12 @@ class Assembler : public Malloced { int current_position() const { return current_position_; } int current_statement_position() const { return current_statement_position_; } + bool can_peephole_optimize(int instructions) { + if (!FLAG_peephole_optimization) return false; + if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false; + return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize; + } + // Read/patch instructions static Instr instr_at(byte* pc) { return *reinterpret_cast(pc); } static void instr_at_put(byte* pc, Instr instr) { @@ -987,6 +991,13 @@ class Assembler : public Malloced { static bool IsLdrRegisterImmediate(Instr instr); static int GetLdrRegisterImmediateOffset(Instr instr); static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset); + static Register GetRd(Instr instr); + static bool IsPush(Instr instr); + static bool IsPop(Instr instr); + static bool IsStrRegFpOffset(Instr instr); + static bool IsLdrRegFpOffset(Instr instr); + static bool IsStrRegFpNegOffset(Instr instr); + static bool IsLdrRegFpNegOffset(Instr instr); protected: diff --git a/deps/v8/src/arm/assembler-thumb2-inl.h b/deps/v8/src/arm/assembler-thumb2-inl.h deleted file mode 100644 index 9e0fc2f731..0000000000 --- a/deps/v8/src/arm/assembler-thumb2-inl.h +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright (c) 1994-2006 Sun Microsystems Inc. -// All Rights Reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// -// - Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// - Redistribution in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the -// distribution. -// -// - Neither the name of Sun Microsystems or the names of contributors may -// be used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED -// OF THE POSSIBILITY OF SUCH DAMAGE. - -// The original source code covered by the above license above has been modified -// significantly by Google Inc. -// Copyright 2006-2008 the V8 project authors. All rights reserved. - -#ifndef V8_ARM_ASSEMBLER_THUMB2_INL_H_ -#define V8_ARM_ASSEMBLER_THUMB2_INL_H_ - -#include "arm/assembler-thumb2.h" -#include "cpu.h" - - -namespace v8 { -namespace internal { - -Condition NegateCondition(Condition cc) { - ASSERT(cc != al); - return static_cast(cc ^ ne); -} - - -void RelocInfo::apply(intptr_t delta) { - if (RelocInfo::IsInternalReference(rmode_)) { - // absolute code pointer inside code object moves with the code object. - int32_t* p = reinterpret_cast(pc_); - *p += delta; // relocate entry - } - // We do not use pc relative addressing on ARM, so there is - // nothing else to do. -} - - -Address RelocInfo::target_address() { - ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); - return Assembler::target_address_at(pc_); -} - - -Address RelocInfo::target_address_address() { - ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); - return reinterpret_cast
    (Assembler::target_address_address_at(pc_)); -} - - -void RelocInfo::set_target_address(Address target) { - ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); - Assembler::set_target_address_at(pc_, target); -} - - -Object* RelocInfo::target_object() { - ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); - return Memory::Object_at(Assembler::target_address_address_at(pc_)); -} - - -Handle RelocInfo::target_object_handle(Assembler* origin) { - ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); - return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_)); -} - - -Object** RelocInfo::target_object_address() { - ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); - return reinterpret_cast(Assembler::target_address_address_at(pc_)); -} - - -void RelocInfo::set_target_object(Object* target) { - ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); - Assembler::set_target_address_at(pc_, reinterpret_cast
    (target)); -} - - -Address* RelocInfo::target_reference_address() { - ASSERT(rmode_ == EXTERNAL_REFERENCE); - return reinterpret_cast(Assembler::target_address_address_at(pc_)); -} - - -Address RelocInfo::call_address() { - ASSERT(IsPatchedReturnSequence()); - // The 2 instructions offset assumes patched return sequence. - ASSERT(IsJSReturn(rmode())); - return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize); -} - - -void RelocInfo::set_call_address(Address target) { - ASSERT(IsPatchedReturnSequence()); - // The 2 instructions offset assumes patched return sequence. - ASSERT(IsJSReturn(rmode())); - Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target; -} - - -Object* RelocInfo::call_object() { - return *call_object_address(); -} - - -Object** RelocInfo::call_object_address() { - ASSERT(IsPatchedReturnSequence()); - // The 2 instructions offset assumes patched return sequence. - ASSERT(IsJSReturn(rmode())); - return reinterpret_cast(pc_ + 2 * Assembler::kInstrSize); -} - - -void RelocInfo::set_call_object(Object* target) { - *call_object_address() = target; -} - - -bool RelocInfo::IsPatchedReturnSequence() { - // On ARM a "call instruction" is actually two instructions. - // mov lr, pc - // ldr pc, [pc, #XXX] - return (Assembler::instr_at(pc_) == kMovLrPc) - && ((Assembler::instr_at(pc_ + Assembler::kInstrSize) & kLdrPCPattern) - == kLdrPCPattern); -} - - -Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) { - rm_ = no_reg; - imm32_ = immediate; - rmode_ = rmode; -} - - -Operand::Operand(const char* s) { - rm_ = no_reg; - imm32_ = reinterpret_cast(s); - rmode_ = RelocInfo::EMBEDDED_STRING; -} - - -Operand::Operand(const ExternalReference& f) { - rm_ = no_reg; - imm32_ = reinterpret_cast(f.address()); - rmode_ = RelocInfo::EXTERNAL_REFERENCE; -} - - -Operand::Operand(Smi* value) { - rm_ = no_reg; - imm32_ = reinterpret_cast(value); - rmode_ = RelocInfo::NONE; -} - - -Operand::Operand(Register rm) { - rm_ = rm; - rs_ = no_reg; - shift_op_ = LSL; - shift_imm_ = 0; -} - - -bool Operand::is_reg() const { - return rm_.is_valid() && - rs_.is(no_reg) && - shift_op_ == LSL && - shift_imm_ == 0; -} - - -void Assembler::CheckBuffer() { - if (buffer_space() <= kGap) { - GrowBuffer(); - } - if (pc_offset() >= next_buffer_check_) { - CheckConstPool(false, true); - } -} - - -void Assembler::emit(Instr x) { - CheckBuffer(); - *reinterpret_cast(pc_) = x; - pc_ += kInstrSize; -} - - -Address Assembler::target_address_address_at(Address pc) { - Address target_pc = pc; - Instr instr = Memory::int32_at(target_pc); - // If we have a bx instruction, the instruction before the bx is - // what we need to patch. - static const int32_t kBxInstMask = 0x0ffffff0; - static const int32_t kBxInstPattern = 0x012fff10; - if ((instr & kBxInstMask) == kBxInstPattern) { - target_pc -= kInstrSize; - instr = Memory::int32_at(target_pc); - } - // Verify that the instruction to patch is a - // ldr , [pc +/- offset_12]. - ASSERT((instr & 0x0f7f0000) == 0x051f0000); - int offset = instr & 0xfff; // offset_12 is unsigned - if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign - // Verify that the constant pool comes after the instruction referencing it. - ASSERT(offset >= -4); - return target_pc + offset + 8; -} - - -Address Assembler::target_address_at(Address pc) { - return Memory::Address_at(target_address_address_at(pc)); -} - - -void Assembler::set_target_at(Address constant_pool_entry, - Address target) { - Memory::Address_at(constant_pool_entry) = target; -} - - -void Assembler::set_target_address_at(Address pc, Address target) { - Memory::Address_at(target_address_address_at(pc)) = target; - // Intuitively, we would think it is necessary to flush the instruction cache - // after patching a target address in the code as follows: - // CPU::FlushICache(pc, sizeof(target)); - // However, on ARM, no instruction was actually patched by the assignment - // above; the target address is not part of an instruction, it is patched in - // the constant pool and is read via a data access; the instruction accessing - // this address in the constant pool remains unchanged. -} - -} } // namespace v8::internal - -#endif // V8_ARM_ASSEMBLER_THUMB2_INL_H_ diff --git a/deps/v8/src/arm/assembler-thumb2.cc b/deps/v8/src/arm/assembler-thumb2.cc deleted file mode 100644 index e31c429175..0000000000 --- a/deps/v8/src/arm/assembler-thumb2.cc +++ /dev/null @@ -1,1878 +0,0 @@ -// Copyright (c) 1994-2006 Sun Microsystems Inc. -// All Rights Reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// -// - Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// - Redistribution in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the -// distribution. -// -// - Neither the name of Sun Microsystems or the names of contributors may -// be used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED -// OF THE POSSIBILITY OF SUCH DAMAGE. - -// The original source code covered by the above license above has been -// modified significantly by Google Inc. -// Copyright 2010 the V8 project authors. All rights reserved. - -#include "v8.h" - -#include "arm/assembler-thumb2-inl.h" -#include "serialize.h" - -namespace v8 { -namespace internal { - -// Safe default is no features. -unsigned CpuFeatures::supported_ = 0; -unsigned CpuFeatures::enabled_ = 0; -unsigned CpuFeatures::found_by_runtime_probing_ = 0; - -void CpuFeatures::Probe() { - // If the compiler is allowed to use vfp then we can use vfp too in our - // code generation. -#if !defined(__arm__) - // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled. - if (FLAG_enable_vfp3) { - supported_ |= 1u << VFP3; - } - // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled - if (FLAG_enable_armv7) { - supported_ |= 1u << ARMv7; - } -#else - if (Serializer::enabled()) { - supported_ |= OS::CpuFeaturesImpliedByPlatform(); - return; // No features if we might serialize. - } - - if (OS::ArmCpuHasFeature(VFP3)) { - // This implementation also sets the VFP flags if - // runtime detection of VFP returns true. - supported_ |= 1u << VFP3; - found_by_runtime_probing_ |= 1u << VFP3; - } - - if (OS::ArmCpuHasFeature(ARMv7)) { - supported_ |= 1u << ARMv7; - found_by_runtime_probing_ |= 1u << ARMv7; - } -#endif -} - - -// ----------------------------------------------------------------------------- -// Implementation of Register and CRegister - -Register no_reg = { -1 }; - -Register r0 = { 0 }; -Register r1 = { 1 }; -Register r2 = { 2 }; -Register r3 = { 3 }; -Register r4 = { 4 }; -Register r5 = { 5 }; -Register r6 = { 6 }; -Register r7 = { 7 }; -Register r8 = { 8 }; // Used as context register. -Register r9 = { 9 }; -Register r10 = { 10 }; // Used as roots register. -Register fp = { 11 }; -Register ip = { 12 }; -Register sp = { 13 }; -Register lr = { 14 }; -Register pc = { 15 }; - - -CRegister no_creg = { -1 }; - -CRegister cr0 = { 0 }; -CRegister cr1 = { 1 }; -CRegister cr2 = { 2 }; -CRegister cr3 = { 3 }; -CRegister cr4 = { 4 }; -CRegister cr5 = { 5 }; -CRegister cr6 = { 6 }; -CRegister cr7 = { 7 }; -CRegister cr8 = { 8 }; -CRegister cr9 = { 9 }; -CRegister cr10 = { 10 }; -CRegister cr11 = { 11 }; -CRegister cr12 = { 12 }; -CRegister cr13 = { 13 }; -CRegister cr14 = { 14 }; -CRegister cr15 = { 15 }; - -// Support for the VFP registers s0 to s31 (d0 to d15). -// Note that "sN:sM" is the same as "dN/2". -SwVfpRegister s0 = { 0 }; -SwVfpRegister s1 = { 1 }; -SwVfpRegister s2 = { 2 }; -SwVfpRegister s3 = { 3 }; -SwVfpRegister s4 = { 4 }; -SwVfpRegister s5 = { 5 }; -SwVfpRegister s6 = { 6 }; -SwVfpRegister s7 = { 7 }; -SwVfpRegister s8 = { 8 }; -SwVfpRegister s9 = { 9 }; -SwVfpRegister s10 = { 10 }; -SwVfpRegister s11 = { 11 }; -SwVfpRegister s12 = { 12 }; -SwVfpRegister s13 = { 13 }; -SwVfpRegister s14 = { 14 }; -SwVfpRegister s15 = { 15 }; -SwVfpRegister s16 = { 16 }; -SwVfpRegister s17 = { 17 }; -SwVfpRegister s18 = { 18 }; -SwVfpRegister s19 = { 19 }; -SwVfpRegister s20 = { 20 }; -SwVfpRegister s21 = { 21 }; -SwVfpRegister s22 = { 22 }; -SwVfpRegister s23 = { 23 }; -SwVfpRegister s24 = { 24 }; -SwVfpRegister s25 = { 25 }; -SwVfpRegister s26 = { 26 }; -SwVfpRegister s27 = { 27 }; -SwVfpRegister s28 = { 28 }; -SwVfpRegister s29 = { 29 }; -SwVfpRegister s30 = { 30 }; -SwVfpRegister s31 = { 31 }; - -DwVfpRegister d0 = { 0 }; -DwVfpRegister d1 = { 1 }; -DwVfpRegister d2 = { 2 }; -DwVfpRegister d3 = { 3 }; -DwVfpRegister d4 = { 4 }; -DwVfpRegister d5 = { 5 }; -DwVfpRegister d6 = { 6 }; -DwVfpRegister d7 = { 7 }; -DwVfpRegister d8 = { 8 }; -DwVfpRegister d9 = { 9 }; -DwVfpRegister d10 = { 10 }; -DwVfpRegister d11 = { 11 }; -DwVfpRegister d12 = { 12 }; -DwVfpRegister d13 = { 13 }; -DwVfpRegister d14 = { 14 }; -DwVfpRegister d15 = { 15 }; - -// ----------------------------------------------------------------------------- -// Implementation of RelocInfo - -const int RelocInfo::kApplyMask = 0; - - -void RelocInfo::PatchCode(byte* instructions, int instruction_count) { - // Patch the code at the current address with the supplied instructions. - Instr* pc = reinterpret_cast(pc_); - Instr* instr = reinterpret_cast(instructions); - for (int i = 0; i < instruction_count; i++) { - *(pc + i) = *(instr + i); - } - - // Indicate that code has changed. - CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize); -} - - -// Patch the code at the current PC with a call to the target address. -// Additional guard instructions can be added if required. -void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { - // Patch the code at the current address with a call to the target. - UNIMPLEMENTED(); -} - - -// ----------------------------------------------------------------------------- -// Implementation of Operand and MemOperand -// See assembler-thumb2-inl.h for inlined constructors - -Operand::Operand(Handle handle) { - rm_ = no_reg; - // Verify all Objects referred by code are NOT in new space. - Object* obj = *handle; - ASSERT(!Heap::InNewSpace(obj)); - if (obj->IsHeapObject()) { - imm32_ = reinterpret_cast(handle.location()); - rmode_ = RelocInfo::EMBEDDED_OBJECT; - } else { - // no relocation needed - imm32_ = reinterpret_cast(obj); - rmode_ = RelocInfo::NONE; - } -} - - -Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) { - ASSERT(is_uint5(shift_imm)); - ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it - rm_ = rm; - rs_ = no_reg; - shift_op_ = shift_op; - shift_imm_ = shift_imm & 31; - if (shift_op == RRX) { - // encoded as ROR with shift_imm == 0 - ASSERT(shift_imm == 0); - shift_op_ = ROR; - shift_imm_ = 0; - } -} - - -Operand::Operand(Register rm, ShiftOp shift_op, Register rs) { - ASSERT(shift_op != RRX); - rm_ = rm; - rs_ = no_reg; - shift_op_ = shift_op; - rs_ = rs; -} - - -MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) { - rn_ = rn; - rm_ = no_reg; - offset_ = offset; - am_ = am; -} - -MemOperand::MemOperand(Register rn, Register rm, AddrMode am) { - rn_ = rn; - rm_ = rm; - shift_op_ = LSL; - shift_imm_ = 0; - am_ = am; -} - - -MemOperand::MemOperand(Register rn, Register rm, - ShiftOp shift_op, int shift_imm, AddrMode am) { - ASSERT(is_uint5(shift_imm)); - rn_ = rn; - rm_ = rm; - shift_op_ = shift_op; - shift_imm_ = shift_imm & 31; - am_ = am; -} - - -// ----------------------------------------------------------------------------- -// Implementation of Assembler. - -// Instruction encoding bits. -enum { - H = 1 << 5, // halfword (or byte) - S6 = 1 << 6, // signed (or unsigned) - L = 1 << 20, // load (or store) - S = 1 << 20, // set condition code (or leave unchanged) - W = 1 << 21, // writeback base register (or leave unchanged) - A = 1 << 21, // accumulate in multiply instruction (or not) - B = 1 << 22, // unsigned byte (or word) - N = 1 << 22, // long (or short) - U = 1 << 23, // positive (or negative) offset/index - P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing) - I = 1 << 25, // immediate shifter operand (or not) - - B4 = 1 << 4, - B5 = 1 << 5, - B6 = 1 << 6, - B7 = 1 << 7, - B8 = 1 << 8, - B9 = 1 << 9, - B12 = 1 << 12, - B16 = 1 << 16, - B18 = 1 << 18, - B19 = 1 << 19, - B20 = 1 << 20, - B21 = 1 << 21, - B22 = 1 << 22, - B23 = 1 << 23, - B24 = 1 << 24, - B25 = 1 << 25, - B26 = 1 << 26, - B27 = 1 << 27, - - // Instruction bit masks. - RdMask = 15 << 12, // in str instruction - CondMask = 15 << 28, - CoprocessorMask = 15 << 8, - OpCodeMask = 15 << 21, // in data-processing instructions - Imm24Mask = (1 << 24) - 1, - Off12Mask = (1 << 12) - 1, - // Reserved condition. - nv = 15 << 28 -}; - - -// add(sp, sp, 4) instruction (aka Pop()) -static const Instr kPopInstruction = - al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12; -// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r)) -// register r is not encoded. -static const Instr kPushRegPattern = - al | B26 | 4 | NegPreIndex | sp.code() * B16; -// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r)) -// register r is not encoded. -static const Instr kPopRegPattern = - al | B26 | L | 4 | PostIndex | sp.code() * B16; -// mov lr, pc -const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12; -// ldr pc, [pc, #XXX] -const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16; - -// Spare buffer. -static const int kMinimalBufferSize = 4*KB; -static byte* spare_buffer_ = NULL; - -Assembler::Assembler(void* buffer, int buffer_size) { - if (buffer == NULL) { - // Do our own buffer management. - if (buffer_size <= kMinimalBufferSize) { - buffer_size = kMinimalBufferSize; - - if (spare_buffer_ != NULL) { - buffer = spare_buffer_; - spare_buffer_ = NULL; - } - } - if (buffer == NULL) { - buffer_ = NewArray(buffer_size); - } else { - buffer_ = static_cast(buffer); - } - buffer_size_ = buffer_size; - own_buffer_ = true; - - } else { - // Use externally provided buffer instead. - ASSERT(buffer_size > 0); - buffer_ = static_cast(buffer); - buffer_size_ = buffer_size; - own_buffer_ = false; - } - - // Setup buffer pointers. - ASSERT(buffer_ != NULL); - pc_ = buffer_; - reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); - num_prinfo_ = 0; - next_buffer_check_ = 0; - no_const_pool_before_ = 0; - last_const_pool_end_ = 0; - last_bound_pos_ = 0; - current_statement_position_ = RelocInfo::kNoPosition; - current_position_ = RelocInfo::kNoPosition; - written_statement_position_ = current_statement_position_; - written_position_ = current_position_; -} - - -Assembler::~Assembler() { - if (own_buffer_) { - if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) { - spare_buffer_ = buffer_; - } else { - DeleteArray(buffer_); - } - } -} - - -void Assembler::GetCode(CodeDesc* desc) { - // Emit constant pool if necessary. - CheckConstPool(true, false); - ASSERT(num_prinfo_ == 0); - - // Setup code descriptor. - desc->buffer = buffer_; - desc->buffer_size = buffer_size_; - desc->instr_size = pc_offset(); - desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); -} - - -void Assembler::Align(int m) { - ASSERT(m >= 4 && IsPowerOf2(m)); - while ((pc_offset() & (m - 1)) != 0) { - nop(); - } -} - - -// Labels refer to positions in the (to be) generated code. -// There are bound, linked, and unused labels. -// -// Bound labels refer to known positions in the already -// generated code. pos() is the position the label refers to. -// -// Linked labels refer to unknown positions in the code -// to be generated; pos() is the position of the last -// instruction using the label. - - -// The link chain is terminated by a negative code position (must be aligned) -const int kEndOfChain = -4; - - -int Assembler::target_at(int pos) { - Instr instr = instr_at(pos); - if ((instr & ~Imm24Mask) == 0) { - // Emitted label constant, not part of a branch. - return instr - (Code::kHeaderSize - kHeapObjectTag); - } - ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 - int imm26 = ((instr & Imm24Mask) << 8) >> 6; - if ((instr & CondMask) == nv && (instr & B24) != 0) - // blx uses bit 24 to encode bit 2 of imm26 - imm26 += 2; - - return pos + kPcLoadDelta + imm26; -} - - -void Assembler::target_at_put(int pos, int target_pos) { - Instr instr = instr_at(pos); - if ((instr & ~Imm24Mask) == 0) { - ASSERT(target_pos == kEndOfChain || target_pos >= 0); - // Emitted label constant, not part of a branch. - // Make label relative to Code* of generated Code object. - instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); - return; - } - int imm26 = target_pos - (pos + kPcLoadDelta); - ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 - if ((instr & CondMask) == nv) { - // blx uses bit 24 to encode bit 2 of imm26 - ASSERT((imm26 & 1) == 0); - instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24; - } else { - ASSERT((imm26 & 3) == 0); - instr &= ~Imm24Mask; - } - int imm24 = imm26 >> 2; - ASSERT(is_int24(imm24)); - instr_at_put(pos, instr | (imm24 & Imm24Mask)); -} - - -void Assembler::print(Label* L) { - if (L->is_unused()) { - PrintF("unused label\n"); - } else if (L->is_bound()) { - PrintF("bound label to %d\n", L->pos()); - } else if (L->is_linked()) { - Label l = *L; - PrintF("unbound label"); - while (l.is_linked()) { - PrintF("@ %d ", l.pos()); - Instr instr = instr_at(l.pos()); - if ((instr & ~Imm24Mask) == 0) { - PrintF("value\n"); - } else { - ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx - int cond = instr & CondMask; - const char* b; - const char* c; - if (cond == nv) { - b = "blx"; - c = ""; - } else { - if ((instr & B24) != 0) - b = "bl"; - else - b = "b"; - - switch (cond) { - case eq: c = "eq"; break; - case ne: c = "ne"; break; - case hs: c = "hs"; break; - case lo: c = "lo"; break; - case mi: c = "mi"; break; - case pl: c = "pl"; break; - case vs: c = "vs"; break; - case vc: c = "vc"; break; - case hi: c = "hi"; break; - case ls: c = "ls"; break; - case ge: c = "ge"; break; - case lt: c = "lt"; break; - case gt: c = "gt"; break; - case le: c = "le"; break; - case al: c = ""; break; - default: - c = ""; - UNREACHABLE(); - } - } - PrintF("%s%s\n", b, c); - } - next(&l); - } - } else { - PrintF("label in inconsistent state (pos = %d)\n", L->pos_); - } -} - - -void Assembler::bind_to(Label* L, int pos) { - ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position - while (L->is_linked()) { - int fixup_pos = L->pos(); - next(L); // call next before overwriting link with target at fixup_pos - target_at_put(fixup_pos, pos); - } - L->bind_to(pos); - - // Keep track of the last bound label so we don't eliminate any instructions - // before a bound label. - if (pos > last_bound_pos_) - last_bound_pos_ = pos; -} - - -void Assembler::link_to(Label* L, Label* appendix) { - if (appendix->is_linked()) { - if (L->is_linked()) { - // Append appendix to L's list. - int fixup_pos; - int link = L->pos(); - do { - fixup_pos = link; - link = target_at(fixup_pos); - } while (link > 0); - ASSERT(link == kEndOfChain); - target_at_put(fixup_pos, appendix->pos()); - } else { - // L is empty, simply use appendix. - *L = *appendix; - } - } - appendix->Unuse(); // appendix should not be used anymore -} - - -void Assembler::bind(Label* L) { - ASSERT(!L->is_bound()); // label can only be bound once - bind_to(L, pc_offset()); -} - - -void Assembler::next(Label* L) { - ASSERT(L->is_linked()); - int link = target_at(L->pos()); - if (link > 0) { - L->link_to(link); - } else { - ASSERT(link == kEndOfChain); - L->Unuse(); - } -} - - -// Low-level code emission routines depending on the addressing mode. -static bool fits_shifter(uint32_t imm32, - uint32_t* rotate_imm, - uint32_t* immed_8, - Instr* instr) { - // imm32 must be unsigned. - for (int rot = 0; rot < 16; rot++) { - uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot)); - if ((imm8 <= 0xff)) { - *rotate_imm = rot; - *immed_8 = imm8; - return true; - } - } - // If the opcode is mov or mvn and if ~imm32 fits, change the opcode. - if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) { - if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { - *instr ^= 0x2*B21; - return true; - } - } - return false; -} - - -// We have to use the temporary register for things that can be relocated even -// if they can be encoded in the ARM's 12 bits of immediate-offset instruction -// space. There is no guarantee that the relocated location can be similarly -// encoded. -static bool MustUseIp(RelocInfo::Mode rmode) { - if (rmode == RelocInfo::EXTERNAL_REFERENCE) { -#ifdef DEBUG - if (!Serializer::enabled()) { - Serializer::TooLateToEnableNow(); - } -#endif - return Serializer::enabled(); - } else if (rmode == RelocInfo::NONE) { - return false; - } - return true; -} - - -void Assembler::addrmod1(Instr instr, - Register rn, - Register rd, - const Operand& x) { - CheckBuffer(); - ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0); - if (!x.rm_.is_valid()) { - // Immediate. - uint32_t rotate_imm; - uint32_t immed_8; - if (MustUseIp(x.rmode_) || - !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { - // The immediate operand cannot be encoded as a shifter operand, so load - // it first to register ip and change the original instruction to use ip. - // However, if the original instruction is a 'mov rd, x' (not setting the - // condition code), then replace it with a 'ldr rd, [pc]'. - RecordRelocInfo(x.rmode_, x.imm32_); - CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed - Condition cond = static_cast(instr & CondMask); - if ((instr & ~CondMask) == 13*B21) { // mov, S not set - ldr(rd, MemOperand(pc, 0), cond); - } else { - ldr(ip, MemOperand(pc, 0), cond); - addrmod1(instr, rn, rd, Operand(ip)); - } - return; - } - instr |= I | rotate_imm*B8 | immed_8; - } else if (!x.rs_.is_valid()) { - // Immediate shift. - instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); - } else { - // Register shift. - ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc)); - instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code(); - } - emit(instr | rn.code()*B16 | rd.code()*B12); - if (rn.is(pc) || x.rm_.is(pc)) - // Block constant pool emission for one instruction after reading pc. - BlockConstPoolBefore(pc_offset() + kInstrSize); -} - - -void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) { - ASSERT((instr & ~(CondMask | B | L)) == B26); - int am = x.am_; - if (!x.rm_.is_valid()) { - // Immediate offset. - int offset_12 = x.offset_; - if (offset_12 < 0) { - offset_12 = -offset_12; - am ^= U; - } - if (!is_uint12(offset_12)) { - // Immediate offset cannot be encoded, load it first to register ip - // rn (and rd in a load) should never be ip, or will be trashed. - ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); - mov(ip, Operand(x.offset_), LeaveCC, - static_cast(instr & CondMask)); - addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_)); - return; - } - ASSERT(offset_12 >= 0); // no masking needed - instr |= offset_12; - } else { - // Register offset (shift_imm_ and shift_op_ are 0) or scaled - // register offset the constructors make sure than both shift_imm_ - // and shift_op_ are initialized. - ASSERT(!x.rm_.is(pc)); - instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); - } - ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback - emit(instr | am | x.rn_.code()*B16 | rd.code()*B12); -} - - -void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) { - ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7)); - ASSERT(x.rn_.is_valid()); - int am = x.am_; - if (!x.rm_.is_valid()) { - // Immediate offset. - int offset_8 = x.offset_; - if (offset_8 < 0) { - offset_8 = -offset_8; - am ^= U; - } - if (!is_uint8(offset_8)) { - // Immediate offset cannot be encoded, load it first to register ip - // rn (and rd in a load) should never be ip, or will be trashed. - ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); - mov(ip, Operand(x.offset_), LeaveCC, - static_cast(instr & CondMask)); - addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); - return; - } - ASSERT(offset_8 >= 0); // no masking needed - instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf); - } else if (x.shift_imm_ != 0) { - // Scaled register offset not supported, load index first - // rn (and rd in a load) should never be ip, or will be trashed. - ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); - mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC, - static_cast(instr & CondMask)); - addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); - return; - } else { - // Register offset. - ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback - instr |= x.rm_.code(); - } - ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback - emit(instr | am | x.rn_.code()*B16 | rd.code()*B12); -} - - -void Assembler::addrmod4(Instr instr, Register rn, RegList rl) { - ASSERT((instr & ~(CondMask | P | U | W | L)) == B27); - ASSERT(rl != 0); - ASSERT(!rn.is(pc)); - emit(instr | rn.code()*B16 | rl); -} - - -void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) { - // Unindexed addressing is not encoded by this function. - ASSERT_EQ((B27 | B26), - (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L))); - ASSERT(x.rn_.is_valid() && !x.rm_.is_valid()); - int am = x.am_; - int offset_8 = x.offset_; - ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset - offset_8 >>= 2; - if (offset_8 < 0) { - offset_8 = -offset_8; - am ^= U; - } - ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte - ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback - - // Post-indexed addressing requires W == 1; different than in addrmod2/3. - if ((am & P) == 0) - am |= W; - - ASSERT(offset_8 >= 0); // no masking needed - emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8); -} - - -int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { - int target_pos; - if (L->is_bound()) { - target_pos = L->pos(); - } else { - if (L->is_linked()) { - target_pos = L->pos(); // L's link - } else { - target_pos = kEndOfChain; - } - L->link_to(pc_offset()); - } - - // Block the emission of the constant pool, since the branch instruction must - // be emitted at the pc offset recorded by the label. - BlockConstPoolBefore(pc_offset() + kInstrSize); - return target_pos - (pc_offset() + kPcLoadDelta); -} - - -void Assembler::label_at_put(Label* L, int at_offset) { - int target_pos; - if (L->is_bound()) { - target_pos = L->pos(); - } else { - if (L->is_linked()) { - target_pos = L->pos(); // L's link - } else { - target_pos = kEndOfChain; - } - L->link_to(at_offset); - instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); - } -} - - -// Branch instructions. -void Assembler::b(int branch_offset, Condition cond) { - ASSERT((branch_offset & 3) == 0); - int imm24 = branch_offset >> 2; - ASSERT(is_int24(imm24)); - emit(cond | B27 | B25 | (imm24 & Imm24Mask)); - - if (cond == al) - // Dead code is a good location to emit the constant pool. - CheckConstPool(false, false); -} - - -void Assembler::bl(int branch_offset, Condition cond) { - ASSERT((branch_offset & 3) == 0); - int imm24 = branch_offset >> 2; - ASSERT(is_int24(imm24)); - emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask)); -} - - -void Assembler::blx(int branch_offset) { // v5 and above - WriteRecordedPositions(); - ASSERT((branch_offset & 1) == 0); - int h = ((branch_offset & 2) >> 1)*B24; - int imm24 = branch_offset >> 2; - ASSERT(is_int24(imm24)); - emit(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask)); -} - - -void Assembler::blx(Register target, Condition cond) { // v5 and above - WriteRecordedPositions(); - ASSERT(!target.is(pc)); - emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code()); -} - - -void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t - WriteRecordedPositions(); - ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged - emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code()); -} - - -// Data-processing instructions. - -// UBFX ,,#,# -// Instruction details available in ARM DDI 0406A, A8-464. -// cond(31-28) | 01111(27-23)| 1(22) | 1(21) | widthm1(20-16) | -// Rd(15-12) | lsb(11-7) | 101(6-4) | Rn(3-0) -void Assembler::ubfx(Register dst, Register src1, const Operand& src2, - const Operand& src3, Condition cond) { - ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid()); - ASSERT(static_cast(src2.imm32_) <= 0x1f); - ASSERT(static_cast(src3.imm32_) <= 0x1f); - emit(cond | 0x3F*B21 | src3.imm32_*B16 | - dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code()); -} - - -void Assembler::and_(Register dst, Register src1, const Operand& src2, - SBit s, Condition cond) { - addrmod1(cond | 0*B21 | s, src1, dst, src2); -} - - -void Assembler::eor(Register dst, Register src1, const Operand& src2, - SBit s, Condition cond) { - addrmod1(cond | 1*B21 | s, src1, dst, src2); -} - - -void Assembler::sub(Register dst, Register src1, const Operand& src2, - SBit s, Condition cond) { - addrmod1(cond | 2*B21 | s, src1, dst, src2); -} - - -void Assembler::rsb(Register dst, Register src1, const Operand& src2, - SBit s, Condition cond) { - addrmod1(cond | 3*B21 | s, src1, dst, src2); -} - - -void Assembler::add(Register dst, Register src1, const Operand& src2, - SBit s, Condition cond) { - addrmod1(cond | 4*B21 | s, src1, dst, src2); - - // Eliminate pattern: push(r), pop() - // str(src, MemOperand(sp, 4, NegPreIndex), al); - // add(sp, sp, Operand(kPointerSize)); - // Both instructions can be eliminated. - int pattern_size = 2 * kInstrSize; - if (FLAG_push_pop_elimination && - last_bound_pos_ <= (pc_offset() - pattern_size) && - reloc_info_writer.last_pc() <= (pc_ - pattern_size) && - // Pattern. - instr_at(pc_ - 1 * kInstrSize) == kPopInstruction && - (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) { - pc_ -= 2 * kInstrSize; - if (FLAG_print_push_pop_elimination) { - PrintF("%x push(reg)/pop() eliminated\n", pc_offset()); - } - } -} - - -void Assembler::adc(Register dst, Register src1, const Operand& src2, - SBit s, Condition cond) { - addrmod1(cond | 5*B21 | s, src1, dst, src2); -} - - -void Assembler::sbc(Register dst, Register src1, const Operand& src2, - SBit s, Condition cond) { - addrmod1(cond | 6*B21 | s, src1, dst, src2); -} - - -void Assembler::rsc(Register dst, Register src1, const Operand& src2, - SBit s, Condition cond) { - addrmod1(cond | 7*B21 | s, src1, dst, src2); -} - - -void Assembler::tst(Register src1, const Operand& src2, Condition cond) { - addrmod1(cond | 8*B21 | S, src1, r0, src2); -} - - -void Assembler::teq(Register src1, const Operand& src2, Condition cond) { - addrmod1(cond | 9*B21 | S, src1, r0, src2); -} - - -void Assembler::cmp(Register src1, const Operand& src2, Condition cond) { - addrmod1(cond | 10*B21 | S, src1, r0, src2); -} - - -void Assembler::cmn(Register src1, const Operand& src2, Condition cond) { - addrmod1(cond | 11*B21 | S, src1, r0, src2); -} - - -void Assembler::orr(Register dst, Register src1, const Operand& src2, - SBit s, Condition cond) { - addrmod1(cond | 12*B21 | s, src1, dst, src2); -} - - -void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) { - if (dst.is(pc)) { - WriteRecordedPositions(); - } - addrmod1(cond | 13*B21 | s, r0, dst, src); -} - - -void Assembler::bic(Register dst, Register src1, const Operand& src2, - SBit s, Condition cond) { - addrmod1(cond | 14*B21 | s, src1, dst, src2); -} - - -void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) { - addrmod1(cond | 15*B21 | s, r0, dst, src); -} - - -// Multiply instructions. -void Assembler::mla(Register dst, Register src1, Register src2, Register srcA, - SBit s, Condition cond) { - ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc)); - emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 | - src2.code()*B8 | B7 | B4 | src1.code()); -} - - -void Assembler::mul(Register dst, Register src1, Register src2, - SBit s, Condition cond) { - ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc)); - // dst goes in bits 16-19 for this instruction! - emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code()); -} - - -void Assembler::smlal(Register dstL, - Register dstH, - Register src1, - Register src2, - SBit s, - Condition cond) { - ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); - ASSERT(!dstL.is(dstH)); - emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 | - src2.code()*B8 | B7 | B4 | src1.code()); -} - - -void Assembler::smull(Register dstL, - Register dstH, - Register src1, - Register src2, - SBit s, - Condition cond) { - ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); - ASSERT(!dstL.is(dstH)); - emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 | - src2.code()*B8 | B7 | B4 | src1.code()); -} - - -void Assembler::umlal(Register dstL, - Register dstH, - Register src1, - Register src2, - SBit s, - Condition cond) { - ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); - ASSERT(!dstL.is(dstH)); - emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 | - src2.code()*B8 | B7 | B4 | src1.code()); -} - - -void Assembler::umull(Register dstL, - Register dstH, - Register src1, - Register src2, - SBit s, - Condition cond) { - ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); - ASSERT(!dstL.is(dstH)); - emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 | - src2.code()*B8 | B7 | B4 | src1.code()); -} - - -// Miscellaneous arithmetic instructions. -void Assembler::clz(Register dst, Register src, Condition cond) { - // v5 and above. - ASSERT(!dst.is(pc) && !src.is(pc)); - emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 | - 15*B8 | B4 | src.code()); -} - - -// Status register access instructions. -void Assembler::mrs(Register dst, SRegister s, Condition cond) { - ASSERT(!dst.is(pc)); - emit(cond | B24 | s | 15*B16 | dst.code()*B12); -} - - -void Assembler::msr(SRegisterFieldMask fields, const Operand& src, - Condition cond) { - ASSERT(fields >= B16 && fields < B20); // at least one field set - Instr instr; - if (!src.rm_.is_valid()) { - // Immediate. - uint32_t rotate_imm; - uint32_t immed_8; - if (MustUseIp(src.rmode_) || - !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) { - // Immediate operand cannot be encoded, load it first to register ip. - RecordRelocInfo(src.rmode_, src.imm32_); - ldr(ip, MemOperand(pc, 0), cond); - msr(fields, Operand(ip), cond); - return; - } - instr = I | rotate_imm*B8 | immed_8; - } else { - ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed - instr = src.rm_.code(); - } - emit(cond | instr | B24 | B21 | fields | 15*B12); -} - - -// Load/Store instructions. -void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) { - if (dst.is(pc)) { - WriteRecordedPositions(); - } - addrmod2(cond | B26 | L, dst, src); - - // Eliminate pattern: push(r), pop(r) - // str(r, MemOperand(sp, 4, NegPreIndex), al) - // ldr(r, MemOperand(sp, 4, PostIndex), al) - // Both instructions can be eliminated. - int pattern_size = 2 * kInstrSize; - if (FLAG_push_pop_elimination && - last_bound_pos_ <= (pc_offset() - pattern_size) && - reloc_info_writer.last_pc() <= (pc_ - pattern_size) && - // Pattern. - instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) && - instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) { - pc_ -= 2 * kInstrSize; - if (FLAG_print_push_pop_elimination) { - PrintF("%x push/pop (same reg) eliminated\n", pc_offset()); - } - } -} - - -void Assembler::str(Register src, const MemOperand& dst, Condition cond) { - addrmod2(cond | B26, src, dst); - - // Eliminate pattern: pop(), push(r) - // add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al - // -> str r, [sp, 0], al - int pattern_size = 2 * kInstrSize; - if (FLAG_push_pop_elimination && - last_bound_pos_ <= (pc_offset() - pattern_size) && - reloc_info_writer.last_pc() <= (pc_ - pattern_size) && - // Pattern. - instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) && - instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) { - pc_ -= 2 * kInstrSize; - emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12); - if (FLAG_print_push_pop_elimination) { - PrintF("%x pop()/push(reg) eliminated\n", pc_offset()); - } - } -} - - -void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) { - addrmod2(cond | B26 | B | L, dst, src); -} - - -void Assembler::strb(Register src, const MemOperand& dst, Condition cond) { - addrmod2(cond | B26 | B, src, dst); -} - - -void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) { - addrmod3(cond | L | B7 | H | B4, dst, src); -} - - -void Assembler::strh(Register src, const MemOperand& dst, Condition cond) { - addrmod3(cond | B7 | H | B4, src, dst); -} - - -void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) { - addrmod3(cond | L | B7 | S6 | B4, dst, src); -} - - -void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) { - addrmod3(cond | L | B7 | S6 | H | B4, dst, src); -} - - -// Load/Store multiple instructions. -void Assembler::ldm(BlockAddrMode am, - Register base, - RegList dst, - Condition cond) { - // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable. - ASSERT(base.is(sp) || (dst & sp.bit()) == 0); - - addrmod4(cond | B27 | am | L, base, dst); - - // Emit the constant pool after a function return implemented by ldm ..{..pc}. - if (cond == al && (dst & pc.bit()) != 0) { - // There is a slight chance that the ldm instruction was actually a call, - // in which case it would be wrong to return into the constant pool; we - // recognize this case by checking if the emission of the pool was blocked - // at the pc of the ldm instruction by a mov lr, pc instruction; if this is - // the case, we emit a jump over the pool. - CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize); - } -} - - -void Assembler::stm(BlockAddrMode am, - Register base, - RegList src, - Condition cond) { - addrmod4(cond | B27 | am, base, src); -} - - -// Semaphore instructions. -void Assembler::swp(Register dst, Register src, Register base, Condition cond) { - ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc)); - ASSERT(!dst.is(base) && !src.is(base)); - emit(cond | P | base.code()*B16 | dst.code()*B12 | - B7 | B4 | src.code()); -} - - -void Assembler::swpb(Register dst, - Register src, - Register base, - Condition cond) { - ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc)); - ASSERT(!dst.is(base) && !src.is(base)); - emit(cond | P | B | base.code()*B16 | dst.code()*B12 | - B7 | B4 | src.code()); -} - - -// Exception-generating instructions and debugging support. -void Assembler::stop(const char* msg) { -#if !defined(__arm__) - // The simulator handles these special instructions and stops execution. - emit(15 << 28 | ((intptr_t) msg)); -#else - // Just issue a simple break instruction for now. Alternatively we could use - // the swi(0x9f0001) instruction on Linux. - bkpt(0); -#endif -} - - -void Assembler::bkpt(uint32_t imm16) { // v5 and above - ASSERT(is_uint16(imm16)); - emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf)); -} - - -void Assembler::swi(uint32_t imm24, Condition cond) { - ASSERT(is_uint24(imm24)); - emit(cond | 15*B24 | imm24); -} - - -// Coprocessor instructions. -void Assembler::cdp(Coprocessor coproc, - int opcode_1, - CRegister crd, - CRegister crn, - CRegister crm, - int opcode_2, - Condition cond) { - ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2)); - emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 | - crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code()); -} - - -void Assembler::cdp2(Coprocessor coproc, - int opcode_1, - CRegister crd, - CRegister crn, - CRegister crm, - int opcode_2) { // v5 and above - cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast(nv)); -} - - -void Assembler::mcr(Coprocessor coproc, - int opcode_1, - Register rd, - CRegister crn, - CRegister crm, - int opcode_2, - Condition cond) { - ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2)); - emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 | - rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code()); -} - - -void Assembler::mcr2(Coprocessor coproc, - int opcode_1, - Register rd, - CRegister crn, - CRegister crm, - int opcode_2) { // v5 and above - mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast(nv)); -} - - -void Assembler::mrc(Coprocessor coproc, - int opcode_1, - Register rd, - CRegister crn, - CRegister crm, - int opcode_2, - Condition cond) { - ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2)); - emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 | - rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code()); -} - - -void Assembler::mrc2(Coprocessor coproc, - int opcode_1, - Register rd, - CRegister crn, - CRegister crm, - int opcode_2) { // v5 and above - mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast(nv)); -} - - -void Assembler::ldc(Coprocessor coproc, - CRegister crd, - const MemOperand& src, - LFlag l, - Condition cond) { - addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src); -} - - -void Assembler::ldc(Coprocessor coproc, - CRegister crd, - Register rn, - int option, - LFlag l, - Condition cond) { - // Unindexed addressing. - ASSERT(is_uint8(option)); - emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 | - coproc*B8 | (option & 255)); -} - - -void Assembler::ldc2(Coprocessor coproc, - CRegister crd, - const MemOperand& src, - LFlag l) { // v5 and above - ldc(coproc, crd, src, l, static_cast(nv)); -} - - -void Assembler::ldc2(Coprocessor coproc, - CRegister crd, - Register rn, - int option, - LFlag l) { // v5 and above - ldc(coproc, crd, rn, option, l, static_cast(nv)); -} - - -void Assembler::stc(Coprocessor coproc, - CRegister crd, - const MemOperand& dst, - LFlag l, - Condition cond) { - addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst); -} - - -void Assembler::stc(Coprocessor coproc, - CRegister crd, - Register rn, - int option, - LFlag l, - Condition cond) { - // Unindexed addressing. - ASSERT(is_uint8(option)); - emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 | - coproc*B8 | (option & 255)); -} - - -void Assembler::stc2(Coprocessor - coproc, CRegister crd, - const MemOperand& dst, - LFlag l) { // v5 and above - stc(coproc, crd, dst, l, static_cast(nv)); -} - - -void Assembler::stc2(Coprocessor coproc, - CRegister crd, - Register rn, - int option, - LFlag l) { // v5 and above - stc(coproc, crd, rn, option, l, static_cast(nv)); -} - - -// Support for VFP. -void Assembler::vldr(const DwVfpRegister dst, - const Register base, - int offset, - const Condition cond) { - // Ddst = MEM(Rbase + offset). - // Instruction details available in ARM DDI 0406A, A8-628. - // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) | - // Vdst(15-12) | 1011(11-8) | offset - ASSERT(CpuFeatures::IsEnabled(VFP3)); - ASSERT(offset % 4 == 0); - emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 | - 0xB*B8 | ((offset / 4) & 255)); -} - - -void Assembler::vstr(const DwVfpRegister src, - const Register base, - int offset, - const Condition cond) { - // MEM(Rbase + offset) = Dsrc. - // Instruction details available in ARM DDI 0406A, A8-786. - // cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) | - // Vsrc(15-12) | 1011(11-8) | (offset/4) - ASSERT(CpuFeatures::IsEnabled(VFP3)); - ASSERT(offset % 4 == 0); - emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 | - 0xB*B8 | ((offset / 4) & 255)); -} - - -void Assembler::vmov(const DwVfpRegister dst, - const Register src1, - const Register src2, - const Condition cond) { - // Dm = . - // Instruction details available in ARM DDI 0406A, A8-646. - // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) | - // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm - ASSERT(CpuFeatures::IsEnabled(VFP3)); - ASSERT(!src1.is(pc) && !src2.is(pc)); - emit(cond | 0xC*B24 | B22 | src2.code()*B16 | - src1.code()*B12 | 0xB*B8 | B4 | dst.code()); -} - - -void Assembler::vmov(const Register dst1, - const Register dst2, - const DwVfpRegister src, - const Condition cond) { - // = Dm. - // Instruction details available in ARM DDI 0406A, A8-646. - // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) | - // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm - ASSERT(CpuFeatures::IsEnabled(VFP3)); - ASSERT(!dst1.is(pc) && !dst2.is(pc)); - emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 | - dst1.code()*B12 | 0xB*B8 | B4 | src.code()); -} - - -void Assembler::vmov(const SwVfpRegister dst, - const Register src, - const Condition cond) { - // Sn = Rt. - // Instruction details available in ARM DDI 0406A, A8-642. - // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) | - // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP3)); - ASSERT(!src.is(pc)); - emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 | - src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4); -} - - -void Assembler::vmov(const Register dst, - const SwVfpRegister src, - const Condition cond) { - // Rt = Sn. - // Instruction details available in ARM DDI 0406A, A8-642. - // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) | - // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP3)); - ASSERT(!dst.is(pc)); - emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 | - dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4); -} - - -void Assembler::vcvt(const DwVfpRegister dst, - const SwVfpRegister src, - const Condition cond) { - // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd). - // Instruction details available in ARM DDI 0406A, A8-576. - // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=000(18-16) | - // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP3)); - emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 | - dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 | - (0x1 & src.code())*B5 | (src.code() >> 1)); -} - - -void Assembler::vcvt(const SwVfpRegister dst, - const DwVfpRegister src, - const Condition cond) { - // Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd). - // Instruction details available in ARM DDI 0406A, A8-576. - // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)| - // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP3)); - emit(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 | - 0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 | - 0x5*B9 | B8 | B7 | B6 | src.code()); -} - - -void Assembler::vadd(const DwVfpRegister dst, - const DwVfpRegister src1, - const DwVfpRegister src2, - const Condition cond) { - // Dd = vadd(Dn, Dm) double precision floating point addition. - // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. - // Instruction details available in ARM DDI 0406A, A8-536. - // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) | - // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP3)); - emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 | - dst.code()*B12 | 0x5*B9 | B8 | src2.code()); -} - - -void Assembler::vsub(const DwVfpRegister dst, - const DwVfpRegister src1, - const DwVfpRegister src2, - const Condition cond) { - // Dd = vsub(Dn, Dm) double precision floating point subtraction. - // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. - // Instruction details available in ARM DDI 0406A, A8-784. - // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) | - // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP3)); - emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 | - dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code()); -} - - -void Assembler::vmul(const DwVfpRegister dst, - const DwVfpRegister src1, - const DwVfpRegister src2, - const Condition cond) { - // Dd = vmul(Dn, Dm) double precision floating point multiplication. - // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. - // Instruction details available in ARM DDI 0406A, A8-784. - // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) | - // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP3)); - emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 | - dst.code()*B12 | 0x5*B9 | B8 | src2.code()); -} - - -void Assembler::vdiv(const DwVfpRegister dst, - const DwVfpRegister src1, - const DwVfpRegister src2, - const Condition cond) { - // Dd = vdiv(Dn, Dm) double precision floating point division. - // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. - // Instruction details available in ARM DDI 0406A, A8-584. - // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) | - // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP3)); - emit(cond | 0xE*B24 | B23 | src1.code()*B16 | - dst.code()*B12 | 0x5*B9 | B8 | src2.code()); -} - - -void Assembler::vcmp(const DwVfpRegister src1, - const DwVfpRegister src2, - const SBit s, - const Condition cond) { - // vcmp(Dd, Dm) double precision floating point comparison. - // Instruction details available in ARM DDI 0406A, A8-570. - // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) | - // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP3)); - emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | - src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code()); -} - - -void Assembler::vmrs(Register dst, Condition cond) { - // Instruction details available in ARM DDI 0406A, A8-652. - // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) | - // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP3)); - emit(cond | 0xE*B24 | 0xF*B20 | B16 | - dst.code()*B12 | 0xA*B8 | B4); -} - - -// Pseudo instructions. -void Assembler::lea(Register dst, - const MemOperand& x, - SBit s, - Condition cond) { - int am = x.am_; - if (!x.rm_.is_valid()) { - // Immediate offset. - if ((am & P) == 0) // post indexing - mov(dst, Operand(x.rn_), s, cond); - else if ((am & U) == 0) // negative indexing - sub(dst, x.rn_, Operand(x.offset_), s, cond); - else - add(dst, x.rn_, Operand(x.offset_), s, cond); - } else { - // Register offset (shift_imm_ and shift_op_ are 0) or scaled - // register offset the constructors make sure than both shift_imm_ - // and shift_op_ are initialized. - ASSERT(!x.rm_.is(pc)); - if ((am & P) == 0) // post indexing - mov(dst, Operand(x.rn_), s, cond); - else if ((am & U) == 0) // negative indexing - sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond); - else - add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond); - } -} - - -bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { - uint32_t dummy1; - uint32_t dummy2; - return fits_shifter(imm32, &dummy1, &dummy2, NULL); -} - - -void Assembler::BlockConstPoolFor(int instructions) { - BlockConstPoolBefore(pc_offset() + instructions * kInstrSize); -} - - -// Debugging. -void Assembler::RecordJSReturn() { - WriteRecordedPositions(); - CheckBuffer(); - RecordRelocInfo(RelocInfo::JS_RETURN); -} - - -void Assembler::RecordComment(const char* msg) { - if (FLAG_debug_code) { - CheckBuffer(); - RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast(msg)); - } -} - - -void Assembler::RecordPosition(int pos) { - if (pos == RelocInfo::kNoPosition) return; - ASSERT(pos >= 0); - current_position_ = pos; -} - - -void Assembler::RecordStatementPosition(int pos) { - if (pos == RelocInfo::kNoPosition) return; - ASSERT(pos >= 0); - current_statement_position_ = pos; -} - - -void Assembler::WriteRecordedPositions() { - // Write the statement position if it is different from what was written last - // time. - if (current_statement_position_ != written_statement_position_) { - CheckBuffer(); - RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_); - written_statement_position_ = current_statement_position_; - } - - // Write the position if it is different from what was written last time and - // also different from the written statement position. - if (current_position_ != written_position_ && - current_position_ != written_statement_position_) { - CheckBuffer(); - RecordRelocInfo(RelocInfo::POSITION, current_position_); - written_position_ = current_position_; - } -} - - -void Assembler::GrowBuffer() { - if (!own_buffer_) FATAL("external code buffer is too small"); - - // Compute new buffer size. - CodeDesc desc; // the new buffer - if (buffer_size_ < 4*KB) { - desc.buffer_size = 4*KB; - } else if (buffer_size_ < 1*MB) { - desc.buffer_size = 2*buffer_size_; - } else { - desc.buffer_size = buffer_size_ + 1*MB; - } - CHECK_GT(desc.buffer_size, 0); // no overflow - - // Setup new buffer. - desc.buffer = NewArray(desc.buffer_size); - - desc.instr_size = pc_offset(); - desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); - - // Copy the data. - int pc_delta = desc.buffer - buffer_; - int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_); - memmove(desc.buffer, buffer_, desc.instr_size); - memmove(reloc_info_writer.pos() + rc_delta, - reloc_info_writer.pos(), desc.reloc_size); - - // Switch buffers. - DeleteArray(buffer_); - buffer_ = desc.buffer; - buffer_size_ = desc.buffer_size; - pc_ += pc_delta; - reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, - reloc_info_writer.last_pc() + pc_delta); - - // None of our relocation types are pc relative pointing outside the code - // buffer nor pc absolute pointing inside the code buffer, so there is no need - // to relocate any emitted relocation entries. - - // Relocate pending relocation entries. - for (int i = 0; i < num_prinfo_; i++) { - RelocInfo& rinfo = prinfo_[i]; - ASSERT(rinfo.rmode() != RelocInfo::COMMENT && - rinfo.rmode() != RelocInfo::POSITION); - if (rinfo.rmode() != RelocInfo::JS_RETURN) { - rinfo.set_pc(rinfo.pc() + pc_delta); - } - } -} - - -void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { - RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants - if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) { - // Adjust code for new modes. - ASSERT(RelocInfo::IsJSReturn(rmode) - || RelocInfo::IsComment(rmode) - || RelocInfo::IsPosition(rmode)); - // These modes do not need an entry in the constant pool. - } else { - ASSERT(num_prinfo_ < kMaxNumPRInfo); - prinfo_[num_prinfo_++] = rinfo; - // Make sure the constant pool is not emitted in place of the next - // instruction for which we just recorded relocation info. - BlockConstPoolBefore(pc_offset() + kInstrSize); - } - if (rinfo.rmode() != RelocInfo::NONE) { - // Don't record external references unless the heap will be serialized. - if (rmode == RelocInfo::EXTERNAL_REFERENCE) { -#ifdef DEBUG - if (!Serializer::enabled()) { - Serializer::TooLateToEnableNow(); - } -#endif - if (!Serializer::enabled() && !FLAG_debug_code) { - return; - } - } - ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here - reloc_info_writer.Write(&rinfo); - } -} - - -void Assembler::CheckConstPool(bool force_emit, bool require_jump) { - // Calculate the offset of the next check. It will be overwritten - // when a const pool is generated or when const pools are being - // blocked for a specific range. - next_buffer_check_ = pc_offset() + kCheckConstInterval; - - // There is nothing to do if there are no pending relocation info entries. - if (num_prinfo_ == 0) return; - - // We emit a constant pool at regular intervals of about kDistBetweenPools - // or when requested by parameter force_emit (e.g. after each function). - // We prefer not to emit a jump unless the max distance is reached or if we - // are running low on slots, which can happen if a lot of constants are being - // emitted (e.g. --debug-code and many static references). - int dist = pc_offset() - last_const_pool_end_; - if (!force_emit && dist < kMaxDistBetweenPools && - (require_jump || dist < kDistBetweenPools) && - // TODO(1236125): Cleanup the "magic" number below. We know that - // the code generation will test every kCheckConstIntervalInst. - // Thus we are safe as long as we generate less than 7 constant - // entries per instruction. - (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) { - return; - } - - // If we did not return by now, we need to emit the constant pool soon. - - // However, some small sequences of instructions must not be broken up by the - // insertion of a constant pool; such sequences are protected by setting - // no_const_pool_before_, which is checked here. Also, recursive calls to - // CheckConstPool are blocked by no_const_pool_before_. - if (pc_offset() < no_const_pool_before_) { - // Emission is currently blocked; make sure we try again as soon as - // possible. - next_buffer_check_ = no_const_pool_before_; - - // Something is wrong if emission is forced and blocked at the same time. - ASSERT(!force_emit); - return; - } - - int jump_instr = require_jump ? kInstrSize : 0; - - // Check that the code buffer is large enough before emitting the constant - // pool and relocation information (include the jump over the pool and the - // constant pool marker). - int max_needed_space = - jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize); - while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer(); - - // Block recursive calls to CheckConstPool. - BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize + - num_prinfo_*kInstrSize); - // Don't bother to check for the emit calls below. - next_buffer_check_ = no_const_pool_before_; - - // Emit jump over constant pool if necessary. - Label after_pool; - if (require_jump) b(&after_pool); - - RecordComment("[ Constant Pool"); - - // Put down constant pool marker "Undefined instruction" as specified by - // A3.1 Instruction set encoding. - emit(0x03000000 | num_prinfo_); - - // Emit constant pool entries. - for (int i = 0; i < num_prinfo_; i++) { - RelocInfo& rinfo = prinfo_[i]; - ASSERT(rinfo.rmode() != RelocInfo::COMMENT && - rinfo.rmode() != RelocInfo::POSITION && - rinfo.rmode() != RelocInfo::STATEMENT_POSITION); - Instr instr = instr_at(rinfo.pc()); - - // Instruction to patch must be a ldr/str [pc, #offset]. - // P and U set, B and W clear, Rn == pc, offset12 still 0. - ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) == - (2*B25 | P | U | pc.code()*B16)); - int delta = pc_ - rinfo.pc() - 8; - ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32 - if (delta < 0) { - instr &= ~U; - delta = -delta; - } - ASSERT(is_uint12(delta)); - instr_at_put(rinfo.pc(), instr + delta); - emit(rinfo.data()); - } - num_prinfo_ = 0; - last_const_pool_end_ = pc_offset(); - - RecordComment("]"); - - if (after_pool.is_linked()) { - bind(&after_pool); - } - - // Since a constant pool was just emitted, move the check offset forward by - // the standard interval. - next_buffer_check_ = pc_offset() + kCheckConstInterval; -} - - -} } // namespace v8::internal diff --git a/deps/v8/src/arm/assembler-thumb2.h b/deps/v8/src/arm/assembler-thumb2.h deleted file mode 100644 index 2da11389ac..0000000000 --- a/deps/v8/src/arm/assembler-thumb2.h +++ /dev/null @@ -1,1036 +0,0 @@ -// Copyright (c) 1994-2006 Sun Microsystems Inc. -// All Rights Reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// -// - Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// - Redistribution in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the -// distribution. -// -// - Neither the name of Sun Microsystems or the names of contributors may -// be used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED -// OF THE POSSIBILITY OF SUCH DAMAGE. - -// The original source code covered by the above license above has been -// modified significantly by Google Inc. -// Copyright 2010 the V8 project authors. All rights reserved. - -// A light-weight ARM Assembler -// Generates user mode instructions for the ARM architecture up to version 5 - -#ifndef V8_ARM_ASSEMBLER_THUMB2_H_ -#define V8_ARM_ASSEMBLER_THUMB2_H_ -#include -#include "assembler.h" -#include "serialize.h" - -namespace v8 { -namespace internal { - -// CPU Registers. -// -// 1) We would prefer to use an enum, but enum values are assignment- -// compatible with int, which has caused code-generation bugs. -// -// 2) We would prefer to use a class instead of a struct but we don't like -// the register initialization to depend on the particular initialization -// order (which appears to be different on OS X, Linux, and Windows for the -// installed versions of C++ we tried). Using a struct permits C-style -// "initialization". Also, the Register objects cannot be const as this -// forces initialization stubs in MSVC, making us dependent on initialization -// order. -// -// 3) By not using an enum, we are possibly preventing the compiler from -// doing certain constant folds, which may significantly reduce the -// code generated for some assembly instructions (because they boil down -// to a few constants). If this is a problem, we could change the code -// such that we use an enum in optimized mode, and the struct in debug -// mode. This way we get the compile-time error checking in debug mode -// and best performance in optimized code. -// -// Core register -struct Register { - bool is_valid() const { return 0 <= code_ && code_ < 16; } - bool is(Register reg) const { return code_ == reg.code_; } - int code() const { - ASSERT(is_valid()); - return code_; - } - int bit() const { - ASSERT(is_valid()); - return 1 << code_; - } - - // Unfortunately we can't make this private in a struct. - int code_; -}; - - -extern Register no_reg; -extern Register r0; -extern Register r1; -extern Register r2; -extern Register r3; -extern Register r4; -extern Register r5; -extern Register r6; -extern Register r7; -extern Register r8; -extern Register r9; -extern Register r10; -extern Register fp; -extern Register ip; -extern Register sp; -extern Register lr; -extern Register pc; - - -// Single word VFP register. -struct SwVfpRegister { - bool is_valid() const { return 0 <= code_ && code_ < 32; } - bool is(SwVfpRegister reg) const { return code_ == reg.code_; } - int code() const { - ASSERT(is_valid()); - return code_; - } - int bit() const { - ASSERT(is_valid()); - return 1 << code_; - } - - int code_; -}; - - -// Double word VFP register. -struct DwVfpRegister { - // Supporting d0 to d15, can be later extended to d31. - bool is_valid() const { return 0 <= code_ && code_ < 16; } - bool is(DwVfpRegister reg) const { return code_ == reg.code_; } - int code() const { - ASSERT(is_valid()); - return code_; - } - int bit() const { - ASSERT(is_valid()); - return 1 << code_; - } - - int code_; -}; - - -// Support for VFP registers s0 to s31 (d0 to d15). -// Note that "s(N):s(N+1)" is the same as "d(N/2)". -extern SwVfpRegister s0; -extern SwVfpRegister s1; -extern SwVfpRegister s2; -extern SwVfpRegister s3; -extern SwVfpRegister s4; -extern SwVfpRegister s5; -extern SwVfpRegister s6; -extern SwVfpRegister s7; -extern SwVfpRegister s8; -extern SwVfpRegister s9; -extern SwVfpRegister s10; -extern SwVfpRegister s11; -extern SwVfpRegister s12; -extern SwVfpRegister s13; -extern SwVfpRegister s14; -extern SwVfpRegister s15; -extern SwVfpRegister s16; -extern SwVfpRegister s17; -extern SwVfpRegister s18; -extern SwVfpRegister s19; -extern SwVfpRegister s20; -extern SwVfpRegister s21; -extern SwVfpRegister s22; -extern SwVfpRegister s23; -extern SwVfpRegister s24; -extern SwVfpRegister s25; -extern SwVfpRegister s26; -extern SwVfpRegister s27; -extern SwVfpRegister s28; -extern SwVfpRegister s29; -extern SwVfpRegister s30; -extern SwVfpRegister s31; - -extern DwVfpRegister d0; -extern DwVfpRegister d1; -extern DwVfpRegister d2; -extern DwVfpRegister d3; -extern DwVfpRegister d4; -extern DwVfpRegister d5; -extern DwVfpRegister d6; -extern DwVfpRegister d7; -extern DwVfpRegister d8; -extern DwVfpRegister d9; -extern DwVfpRegister d10; -extern DwVfpRegister d11; -extern DwVfpRegister d12; -extern DwVfpRegister d13; -extern DwVfpRegister d14; -extern DwVfpRegister d15; - - -// Coprocessor register -struct CRegister { - bool is_valid() const { return 0 <= code_ && code_ < 16; } - bool is(CRegister creg) const { return code_ == creg.code_; } - int code() const { - ASSERT(is_valid()); - return code_; - } - int bit() const { - ASSERT(is_valid()); - return 1 << code_; - } - - // Unfortunately we can't make this private in a struct. - int code_; -}; - - -extern CRegister no_creg; -extern CRegister cr0; -extern CRegister cr1; -extern CRegister cr2; -extern CRegister cr3; -extern CRegister cr4; -extern CRegister cr5; -extern CRegister cr6; -extern CRegister cr7; -extern CRegister cr8; -extern CRegister cr9; -extern CRegister cr10; -extern CRegister cr11; -extern CRegister cr12; -extern CRegister cr13; -extern CRegister cr14; -extern CRegister cr15; - - -// Coprocessor number -enum Coprocessor { - p0 = 0, - p1 = 1, - p2 = 2, - p3 = 3, - p4 = 4, - p5 = 5, - p6 = 6, - p7 = 7, - p8 = 8, - p9 = 9, - p10 = 10, - p11 = 11, - p12 = 12, - p13 = 13, - p14 = 14, - p15 = 15 -}; - - -// Condition field in instructions. -enum Condition { - eq = 0 << 28, // Z set equal. - ne = 1 << 28, // Z clear not equal. - nz = 1 << 28, // Z clear not zero. - cs = 2 << 28, // C set carry set. - hs = 2 << 28, // C set unsigned higher or same. - cc = 3 << 28, // C clear carry clear. - lo = 3 << 28, // C clear unsigned lower. - mi = 4 << 28, // N set negative. - pl = 5 << 28, // N clear positive or zero. - vs = 6 << 28, // V set overflow. - vc = 7 << 28, // V clear no overflow. - hi = 8 << 28, // C set, Z clear unsigned higher. - ls = 9 << 28, // C clear or Z set unsigned lower or same. - ge = 10 << 28, // N == V greater or equal. - lt = 11 << 28, // N != V less than. - gt = 12 << 28, // Z clear, N == V greater than. - le = 13 << 28, // Z set or N != V less then or equal - al = 14 << 28 // always. -}; - - -// Returns the equivalent of !cc. -INLINE(Condition NegateCondition(Condition cc)); - - -// Corresponds to transposing the operands of a comparison. -inline Condition ReverseCondition(Condition cc) { - switch (cc) { - case lo: - return hi; - case hi: - return lo; - case hs: - return ls; - case ls: - return hs; - case lt: - return gt; - case gt: - return lt; - case ge: - return le; - case le: - return ge; - default: - return cc; - }; -} - - -// Branch hints are not used on the ARM. They are defined so that they can -// appear in shared function signatures, but will be ignored in ARM -// implementations. -enum Hint { no_hint }; - -// Hints are not used on the arm. Negating is trivial. -inline Hint NegateHint(Hint ignored) { return no_hint; } - - -// ----------------------------------------------------------------------------- -// Addressing modes and instruction variants - -// Shifter operand shift operation -enum ShiftOp { - LSL = 0 << 5, - LSR = 1 << 5, - ASR = 2 << 5, - ROR = 3 << 5, - RRX = -1 -}; - - -// Condition code updating mode -enum SBit { - SetCC = 1 << 20, // set condition code - LeaveCC = 0 << 20 // leave condition code unchanged -}; - - -// Status register selection -enum SRegister { - CPSR = 0 << 22, - SPSR = 1 << 22 -}; - - -// Status register fields -enum SRegisterField { - CPSR_c = CPSR | 1 << 16, - CPSR_x = CPSR | 1 << 17, - CPSR_s = CPSR | 1 << 18, - CPSR_f = CPSR | 1 << 19, - SPSR_c = SPSR | 1 << 16, - SPSR_x = SPSR | 1 << 17, - SPSR_s = SPSR | 1 << 18, - SPSR_f = SPSR | 1 << 19 -}; - -// Status register field mask (or'ed SRegisterField enum values) -typedef uint32_t SRegisterFieldMask; - - -// Memory operand addressing mode -enum AddrMode { - // bit encoding P U W - Offset = (8|4|0) << 21, // offset (without writeback to base) - PreIndex = (8|4|1) << 21, // pre-indexed addressing with writeback - PostIndex = (0|4|0) << 21, // post-indexed addressing with writeback - NegOffset = (8|0|0) << 21, // negative offset (without writeback to base) - NegPreIndex = (8|0|1) << 21, // negative pre-indexed with writeback - NegPostIndex = (0|0|0) << 21 // negative post-indexed with writeback -}; - - -// Load/store multiple addressing mode -enum BlockAddrMode { - // bit encoding P U W - da = (0|0|0) << 21, // decrement after - ia = (0|4|0) << 21, // increment after - db = (8|0|0) << 21, // decrement before - ib = (8|4|0) << 21, // increment before - da_w = (0|0|1) << 21, // decrement after with writeback to base - ia_w = (0|4|1) << 21, // increment after with writeback to base - db_w = (8|0|1) << 21, // decrement before with writeback to base - ib_w = (8|4|1) << 21 // increment before with writeback to base -}; - - -// Coprocessor load/store operand size -enum LFlag { - Long = 1 << 22, // long load/store coprocessor - Short = 0 << 22 // short load/store coprocessor -}; - - -// ----------------------------------------------------------------------------- -// Machine instruction Operands - -// Class Operand represents a shifter operand in data processing instructions -class Operand BASE_EMBEDDED { - public: - // immediate - INLINE(explicit Operand(int32_t immediate, - RelocInfo::Mode rmode = RelocInfo::NONE)); - INLINE(explicit Operand(const ExternalReference& f)); - INLINE(explicit Operand(const char* s)); - explicit Operand(Handle handle); - INLINE(explicit Operand(Smi* value)); - - // rm - INLINE(explicit Operand(Register rm)); - - // rm shift_imm - explicit Operand(Register rm, ShiftOp shift_op, int shift_imm); - - // rm rs - explicit Operand(Register rm, ShiftOp shift_op, Register rs); - - // Return true if this is a register operand. - INLINE(bool is_reg() const); - - Register rm() const { return rm_; } - - private: - Register rm_; - Register rs_; - ShiftOp shift_op_; - int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg - int32_t imm32_; // valid if rm_ == no_reg - RelocInfo::Mode rmode_; - - friend class Assembler; -}; - - -// Class MemOperand represents a memory operand in load and store instructions -class MemOperand BASE_EMBEDDED { - public: - // [rn +/- offset] Offset/NegOffset - // [rn +/- offset]! PreIndex/NegPreIndex - // [rn], +/- offset PostIndex/NegPostIndex - // offset is any signed 32-bit value; offset is first loaded to register ip if - // it does not fit the addressing mode (12-bit unsigned and sign bit) - explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset); - - // [rn +/- rm] Offset/NegOffset - // [rn +/- rm]! PreIndex/NegPreIndex - // [rn], +/- rm PostIndex/NegPostIndex - explicit MemOperand(Register rn, Register rm, AddrMode am = Offset); - - // [rn +/- rm shift_imm] Offset/NegOffset - // [rn +/- rm shift_imm]! PreIndex/NegPreIndex - // [rn], +/- rm shift_imm PostIndex/NegPostIndex - explicit MemOperand(Register rn, Register rm, - ShiftOp shift_op, int shift_imm, AddrMode am = Offset); - - private: - Register rn_; // base - Register rm_; // register offset - int32_t offset_; // valid if rm_ == no_reg - ShiftOp shift_op_; - int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg - AddrMode am_; // bits P, U, and W - - friend class Assembler; -}; - -// CpuFeatures keeps track of which features are supported by the target CPU. -// Supported features must be enabled by a Scope before use. -class CpuFeatures : public AllStatic { - public: - // Detect features of the target CPU. Set safe defaults if the serializer - // is enabled (snapshots must be portable). - static void Probe(); - - // Check whether a feature is supported by the target CPU. - static bool IsSupported(CpuFeature f) { - if (f == VFP3 && !FLAG_enable_vfp3) return false; - return (supported_ & (1u << f)) != 0; - } - - // Check whether a feature is currently enabled. - static bool IsEnabled(CpuFeature f) { - return (enabled_ & (1u << f)) != 0; - } - - // Enable a specified feature within a scope. - class Scope BASE_EMBEDDED { -#ifdef DEBUG - public: - explicit Scope(CpuFeature f) { - ASSERT(CpuFeatures::IsSupported(f)); - ASSERT(!Serializer::enabled() || - (found_by_runtime_probing_ & (1u << f)) == 0); - old_enabled_ = CpuFeatures::enabled_; - CpuFeatures::enabled_ |= 1u << f; - } - ~Scope() { CpuFeatures::enabled_ = old_enabled_; } - private: - unsigned old_enabled_; -#else - public: - explicit Scope(CpuFeature f) {} -#endif - }; - - private: - static unsigned supported_; - static unsigned enabled_; - static unsigned found_by_runtime_probing_; -}; - - -typedef int32_t Instr; - - -extern const Instr kMovLrPc; -extern const Instr kLdrPCPattern; - - -class Assembler : public Malloced { - public: - // Create an assembler. Instructions and relocation information are emitted - // into a buffer, with the instructions starting from the beginning and the - // relocation information starting from the end of the buffer. See CodeDesc - // for a detailed comment on the layout (globals.h). - // - // If the provided buffer is NULL, the assembler allocates and grows its own - // buffer, and buffer_size determines the initial buffer size. The buffer is - // owned by the assembler and deallocated upon destruction of the assembler. - // - // If the provided buffer is not NULL, the assembler uses the provided buffer - // for code generation and assumes its size to be buffer_size. If the buffer - // is too small, a fatal error occurs. No deallocation of the buffer is done - // upon destruction of the assembler. - Assembler(void* buffer, int buffer_size); - ~Assembler(); - - // GetCode emits any pending (non-emitted) code and fills the descriptor - // desc. GetCode() is idempotent; it returns the same result if no other - // Assembler functions are invoked in between GetCode() calls. - void GetCode(CodeDesc* desc); - - // Label operations & relative jumps (PPUM Appendix D) - // - // Takes a branch opcode (cc) and a label (L) and generates - // either a backward branch or a forward branch and links it - // to the label fixup chain. Usage: - // - // Label L; // unbound label - // j(cc, &L); // forward branch to unbound label - // bind(&L); // bind label to the current pc - // j(cc, &L); // backward branch to bound label - // bind(&L); // illegal: a label may be bound only once - // - // Note: The same Label can be used for forward and backward branches - // but it may be bound only once. - - void bind(Label* L); // binds an unbound label L to the current code position - - // Returns the branch offset to the given label from the current code position - // Links the label to the current position if it is still unbound - // Manages the jump elimination optimization if the second parameter is true. - int branch_offset(Label* L, bool jump_elimination_allowed); - - // Puts a labels target address at the given position. - // The high 8 bits are set to zero. - void label_at_put(Label* L, int at_offset); - - // Return the address in the constant pool of the code target address used by - // the branch/call instruction at pc. - INLINE(static Address target_address_address_at(Address pc)); - - // Read/Modify the code target address in the branch/call instruction at pc. - INLINE(static Address target_address_at(Address pc)); - INLINE(static void set_target_address_at(Address pc, Address target)); - - // This sets the branch destination (which is in the constant pool on ARM). - // This is for calls and branches within generated code. - inline static void set_target_at(Address constant_pool_entry, Address target); - - // This sets the branch destination (which is in the constant pool on ARM). - // This is for calls and branches to runtime code. - inline static void set_external_target_at(Address constant_pool_entry, - Address target) { - set_target_at(constant_pool_entry, target); - } - - // Here we are patching the address in the constant pool, not the actual call - // instruction. The address in the constant pool is the same size as a - // pointer. - static const int kCallTargetSize = kPointerSize; - static const int kExternalTargetSize = kPointerSize; - - // Size of an instruction. - static const int kInstrSize = sizeof(Instr); - - // Distance between the instruction referring to the address of the call - // target (ldr pc, [target addr in const pool]) and the return address - static const int kCallTargetAddressOffset = kInstrSize; - - // Distance between start of patched return sequence and the emitted address - // to jump to. - static const int kPatchReturnSequenceAddressOffset = kInstrSize; - - // Difference between address of current opcode and value read from pc - // register. - static const int kPcLoadDelta = 8; - - static const int kJSReturnSequenceLength = 4; - - // --------------------------------------------------------------------------- - // Code generation - - // Insert the smallest number of nop instructions - // possible to align the pc offset to a multiple - // of m. m must be a power of 2 (>= 4). - void Align(int m); - - // Branch instructions - void b(int branch_offset, Condition cond = al); - void bl(int branch_offset, Condition cond = al); - void blx(int branch_offset); // v5 and above - void blx(Register target, Condition cond = al); // v5 and above - void bx(Register target, Condition cond = al); // v5 and above, plus v4t - - // Convenience branch instructions using labels - void b(Label* L, Condition cond = al) { - b(branch_offset(L, cond == al), cond); - } - void b(Condition cond, Label* L) { b(branch_offset(L, cond == al), cond); } - void bl(Label* L, Condition cond = al) { bl(branch_offset(L, false), cond); } - void bl(Condition cond, Label* L) { bl(branch_offset(L, false), cond); } - void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above - - // Data-processing instructions - void ubfx(Register dst, Register src1, const Operand& src2, - const Operand& src3, Condition cond = al); - - void and_(Register dst, Register src1, const Operand& src2, - SBit s = LeaveCC, Condition cond = al); - - void eor(Register dst, Register src1, const Operand& src2, - SBit s = LeaveCC, Condition cond = al); - - void sub(Register dst, Register src1, const Operand& src2, - SBit s = LeaveCC, Condition cond = al); - void sub(Register dst, Register src1, Register src2, - SBit s = LeaveCC, Condition cond = al) { - sub(dst, src1, Operand(src2), s, cond); - } - - void rsb(Register dst, Register src1, const Operand& src2, - SBit s = LeaveCC, Condition cond = al); - - void add(Register dst, Register src1, const Operand& src2, - SBit s = LeaveCC, Condition cond = al); - - void adc(Register dst, Register src1, const Operand& src2, - SBit s = LeaveCC, Condition cond = al); - - void sbc(Register dst, Register src1, const Operand& src2, - SBit s = LeaveCC, Condition cond = al); - - void rsc(Register dst, Register src1, const Operand& src2, - SBit s = LeaveCC, Condition cond = al); - - void tst(Register src1, const Operand& src2, Condition cond = al); - void tst(Register src1, Register src2, Condition cond = al) { - tst(src1, Operand(src2), cond); - } - - void teq(Register src1, const Operand& src2, Condition cond = al); - - void cmp(Register src1, const Operand& src2, Condition cond = al); - void cmp(Register src1, Register src2, Condition cond = al) { - cmp(src1, Operand(src2), cond); - } - - void cmn(Register src1, const Operand& src2, Condition cond = al); - - void orr(Register dst, Register src1, const Operand& src2, - SBit s = LeaveCC, Condition cond = al); - void orr(Register dst, Register src1, Register src2, - SBit s = LeaveCC, Condition cond = al) { - orr(dst, src1, Operand(src2), s, cond); - } - - void mov(Register dst, const Operand& src, - SBit s = LeaveCC, Condition cond = al); - void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al) { - mov(dst, Operand(src), s, cond); - } - - void bic(Register dst, Register src1, const Operand& src2, - SBit s = LeaveCC, Condition cond = al); - - void mvn(Register dst, const Operand& src, - SBit s = LeaveCC, Condition cond = al); - - // Multiply instructions - - void mla(Register dst, Register src1, Register src2, Register srcA, - SBit s = LeaveCC, Condition cond = al); - - void mul(Register dst, Register src1, Register src2, - SBit s = LeaveCC, Condition cond = al); - - void smlal(Register dstL, Register dstH, Register src1, Register src2, - SBit s = LeaveCC, Condition cond = al); - - void smull(Register dstL, Register dstH, Register src1, Register src2, - SBit s = LeaveCC, Condition cond = al); - - void umlal(Register dstL, Register dstH, Register src1, Register src2, - SBit s = LeaveCC, Condition cond = al); - - void umull(Register dstL, Register dstH, Register src1, Register src2, - SBit s = LeaveCC, Condition cond = al); - - // Miscellaneous arithmetic instructions - - void clz(Register dst, Register src, Condition cond = al); // v5 and above - - // Status register access instructions - - void mrs(Register dst, SRegister s, Condition cond = al); - void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al); - - // Load/Store instructions - void ldr(Register dst, const MemOperand& src, Condition cond = al); - void str(Register src, const MemOperand& dst, Condition cond = al); - void ldrb(Register dst, const MemOperand& src, Condition cond = al); - void strb(Register src, const MemOperand& dst, Condition cond = al); - void ldrh(Register dst, const MemOperand& src, Condition cond = al); - void strh(Register src, const MemOperand& dst, Condition cond = al); - void ldrsb(Register dst, const MemOperand& src, Condition cond = al); - void ldrsh(Register dst, const MemOperand& src, Condition cond = al); - - // Load/Store multiple instructions - void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al); - void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al); - - // Semaphore instructions - void swp(Register dst, Register src, Register base, Condition cond = al); - void swpb(Register dst, Register src, Register base, Condition cond = al); - - // Exception-generating instructions and debugging support - void stop(const char* msg); - - void bkpt(uint32_t imm16); // v5 and above - void swi(uint32_t imm24, Condition cond = al); - - // Coprocessor instructions - - void cdp(Coprocessor coproc, int opcode_1, - CRegister crd, CRegister crn, CRegister crm, - int opcode_2, Condition cond = al); - - void cdp2(Coprocessor coproc, int opcode_1, - CRegister crd, CRegister crn, CRegister crm, - int opcode_2); // v5 and above - - void mcr(Coprocessor coproc, int opcode_1, - Register rd, CRegister crn, CRegister crm, - int opcode_2 = 0, Condition cond = al); - - void mcr2(Coprocessor coproc, int opcode_1, - Register rd, CRegister crn, CRegister crm, - int opcode_2 = 0); // v5 and above - - void mrc(Coprocessor coproc, int opcode_1, - Register rd, CRegister crn, CRegister crm, - int opcode_2 = 0, Condition cond = al); - - void mrc2(Coprocessor coproc, int opcode_1, - Register rd, CRegister crn, CRegister crm, - int opcode_2 = 0); // v5 and above - - void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src, - LFlag l = Short, Condition cond = al); - void ldc(Coprocessor coproc, CRegister crd, Register base, int option, - LFlag l = Short, Condition cond = al); - - void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src, - LFlag l = Short); // v5 and above - void ldc2(Coprocessor coproc, CRegister crd, Register base, int option, - LFlag l = Short); // v5 and above - - void stc(Coprocessor coproc, CRegister crd, const MemOperand& dst, - LFlag l = Short, Condition cond = al); - void stc(Coprocessor coproc, CRegister crd, Register base, int option, - LFlag l = Short, Condition cond = al); - - void stc2(Coprocessor coproc, CRegister crd, const MemOperand& dst, - LFlag l = Short); // v5 and above - void stc2(Coprocessor coproc, CRegister crd, Register base, int option, - LFlag l = Short); // v5 and above - - // Support for VFP. - // All these APIs support S0 to S31 and D0 to D15. - // Currently these APIs do not support extended D registers, i.e, D16 to D31. - // However, some simple modifications can allow - // these APIs to support D16 to D31. - - void vldr(const DwVfpRegister dst, - const Register base, - int offset, // Offset must be a multiple of 4. - const Condition cond = al); - void vstr(const DwVfpRegister src, - const Register base, - int offset, // Offset must be a multiple of 4. - const Condition cond = al); - void vmov(const DwVfpRegister dst, - const Register src1, - const Register src2, - const Condition cond = al); - void vmov(const Register dst1, - const Register dst2, - const DwVfpRegister src, - const Condition cond = al); - void vmov(const SwVfpRegister dst, - const Register src, - const Condition cond = al); - void vmov(const Register dst, - const SwVfpRegister src, - const Condition cond = al); - void vcvt(const DwVfpRegister dst, - const SwVfpRegister src, - const Condition cond = al); - void vcvt(const SwVfpRegister dst, - const DwVfpRegister src, - const Condition cond = al); - - void vadd(const DwVfpRegister dst, - const DwVfpRegister src1, - const DwVfpRegister src2, - const Condition cond = al); - void vsub(const DwVfpRegister dst, - const DwVfpRegister src1, - const DwVfpRegister src2, - const Condition cond = al); - void vmul(const DwVfpRegister dst, - const DwVfpRegister src1, - const DwVfpRegister src2, - const Condition cond = al); - void vdiv(const DwVfpRegister dst, - const DwVfpRegister src1, - const DwVfpRegister src2, - const Condition cond = al); - void vcmp(const DwVfpRegister src1, - const DwVfpRegister src2, - const SBit s = LeaveCC, - const Condition cond = al); - void vmrs(const Register dst, - const Condition cond = al); - - // Pseudo instructions - void nop() { mov(r0, Operand(r0)); } - - void push(Register src, Condition cond = al) { - str(src, MemOperand(sp, 4, NegPreIndex), cond); - } - - void pop(Register dst, Condition cond = al) { - ldr(dst, MemOperand(sp, 4, PostIndex), cond); - } - - void pop() { - add(sp, sp, Operand(kPointerSize)); - } - - // Load effective address of memory operand x into register dst - void lea(Register dst, const MemOperand& x, - SBit s = LeaveCC, Condition cond = al); - - // Jump unconditionally to given label. - void jmp(Label* L) { b(L, al); } - - // Check the code size generated from label to here. - int InstructionsGeneratedSince(Label* l) { - return (pc_offset() - l->pos()) / kInstrSize; - } - - // Check whether an immediate fits an addressing mode 1 instruction. - bool ImmediateFitsAddrMode1Instruction(int32_t imm32); - - // Postpone the generation of the constant pool for the specified number of - // instructions. - void BlockConstPoolFor(int instructions); - - // Debugging - - // Mark address of the ExitJSFrame code. - void RecordJSReturn(); - - // Record a comment relocation entry that can be used by a disassembler. - // Use --debug_code to enable. - void RecordComment(const char* msg); - - void RecordPosition(int pos); - void RecordStatementPosition(int pos); - void WriteRecordedPositions(); - - int pc_offset() const { return pc_ - buffer_; } - int current_position() const { return current_position_; } - int current_statement_position() const { return current_statement_position_; } - - protected: - int buffer_space() const { return reloc_info_writer.pos() - pc_; } - - // Read/patch instructions - static Instr instr_at(byte* pc) { return *reinterpret_cast(pc); } - void instr_at_put(byte* pc, Instr instr) { - *reinterpret_cast(pc) = instr; - } - Instr instr_at(int pos) { return *reinterpret_cast(buffer_ + pos); } - void instr_at_put(int pos, Instr instr) { - *reinterpret_cast(buffer_ + pos) = instr; - } - - // Decode branch instruction at pos and return branch target pos - int target_at(int pos); - - // Patch branch instruction at pos to branch to given branch target pos - void target_at_put(int pos, int target_pos); - - // Check if is time to emit a constant pool for pending reloc info entries - void CheckConstPool(bool force_emit, bool require_jump); - - // Block the emission of the constant pool before pc_offset - void BlockConstPoolBefore(int pc_offset) { - if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset; - } - - private: - // Code buffer: - // The buffer into which code and relocation info are generated. - byte* buffer_; - int buffer_size_; - // True if the assembler owns the buffer, false if buffer is external. - bool own_buffer_; - - // Buffer size and constant pool distance are checked together at regular - // intervals of kBufferCheckInterval emitted bytes - static const int kBufferCheckInterval = 1*KB/2; - int next_buffer_check_; // pc offset of next buffer check - - // Code generation - // The relocation writer's position is at least kGap bytes below the end of - // the generated instructions. This is so that multi-instruction sequences do - // not have to check for overflow. The same is true for writes of large - // relocation info entries. - static const int kGap = 32; - byte* pc_; // the program counter; moves forward - - // Constant pool generation - // Pools are emitted in the instruction stream, preferably after unconditional - // jumps or after returns from functions (in dead code locations). - // If a long code sequence does not contain unconditional jumps, it is - // necessary to emit the constant pool before the pool gets too far from the - // location it is accessed from. In this case, we emit a jump over the emitted - // constant pool. - // Constants in the pool may be addresses of functions that gets relocated; - // if so, a relocation info entry is associated to the constant pool entry. - - // Repeated checking whether the constant pool should be emitted is rather - // expensive. By default we only check again once a number of instructions - // has been generated. That also means that the sizing of the buffers is not - // an exact science, and that we rely on some slop to not overrun buffers. - static const int kCheckConstIntervalInst = 32; - static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize; - - - // Pools are emitted after function return and in dead code at (more or less) - // regular intervals of kDistBetweenPools bytes - static const int kDistBetweenPools = 1*KB; - - // Constants in pools are accessed via pc relative addressing, which can - // reach +/-4KB thereby defining a maximum distance between the instruction - // and the accessed constant. We satisfy this constraint by limiting the - // distance between pools. - static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval; - - // Emission of the constant pool may be blocked in some code sequences - int no_const_pool_before_; // block emission before this pc offset - - // Keep track of the last emitted pool to guarantee a maximal distance - int last_const_pool_end_; // pc offset following the last constant pool - - // Relocation info generation - // Each relocation is encoded as a variable size value - static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; - RelocInfoWriter reloc_info_writer; - // Relocation info records are also used during code generation as temporary - // containers for constants and code target addresses until they are emitted - // to the constant pool. These pending relocation info records are temporarily - // stored in a separate buffer until a constant pool is emitted. - // If every instruction in a long sequence is accessing the pool, we need one - // pending relocation entry per instruction. - static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize; - RelocInfo prinfo_[kMaxNumPRInfo]; // the buffer of pending relocation info - int num_prinfo_; // number of pending reloc info entries in the buffer - - // The bound position, before this we cannot do instruction elimination. - int last_bound_pos_; - - // source position information - int current_position_; - int current_statement_position_; - int written_position_; - int written_statement_position_; - - // Code emission - inline void CheckBuffer(); - void GrowBuffer(); - inline void emit(Instr x); - - // Instruction generation - void addrmod1(Instr instr, Register rn, Register rd, const Operand& x); - void addrmod2(Instr instr, Register rd, const MemOperand& x); - void addrmod3(Instr instr, Register rd, const MemOperand& x); - void addrmod4(Instr instr, Register rn, RegList rl); - void addrmod5(Instr instr, CRegister crd, const MemOperand& x); - - // Labels - void print(Label* L); - void bind_to(Label* L, int pos); - void link_to(Label* L, Label* appendix); - void next(Label* L); - - // Record reloc info for current pc_ - void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); - - friend class RegExpMacroAssemblerARM; - friend class RelocInfo; - friend class CodePatcher; -}; - -} } // namespace v8::internal - -#endif // V8_ARM_ASSEMBLER_THUMB2_H_ diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index 5718cb3ce2..1f776562f2 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -27,6 +27,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_ARM) + #include "codegen-inl.h" #include "debug.h" #include "runtime.h" @@ -130,7 +132,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm, // of the JSArray. // result: JSObject // scratch2: start of next object - __ lea(scratch1, MemOperand(result, JSArray::kSize)); + __ add(scratch1, result, Operand(JSArray::kSize)); __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset)); // Clear the heap tag on the elements array. @@ -1311,3 +1313,5 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { #undef __ } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index 5509830b30..7b62da9d33 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -27,12 +27,15 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_ARM) + #include "bootstrapper.h" #include "codegen-inl.h" #include "compiler.h" #include "debug.h" #include "ic-inl.h" #include "jsregexp.h" +#include "jump-target-light-inl.h" #include "parser.h" #include "regexp-macro-assembler.h" #include "regexp-stack.h" @@ -40,10 +43,12 @@ #include "runtime.h" #include "scopes.h" #include "virtual-frame-inl.h" +#include "virtual-frame-arm-inl.h" namespace v8 { namespace internal { + #define __ ACCESS_MASM(masm_) static void EmitIdenticalObjectComparison(MacroAssembler* masm, @@ -274,7 +279,7 @@ void CodeGenerator::Generate(CompilationInfo* info) { // Initialize the function return target after the locals are set // up, because it needs the expected frame height from the frame. - function_return_.set_direction(JumpTarget::BIDIRECTIONAL); + function_return_.SetExpectedHeight(); function_return_is_shadowed_ = false; // Generate code to 'execute' declarations and initialize functions @@ -1143,44 +1148,66 @@ void CodeGenerator::SmiOperation(Token::Value op, int shift_value = int_value & 0x1f; // least significant 5 bits DeferredCode* deferred = new DeferredInlineSmiOperation(op, shift_value, false, mode, tos); - __ tst(tos, Operand(kSmiTagMask)); - deferred->Branch(ne); - __ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // remove tags + uint32_t problematic_mask = kSmiTagMask; + // For unsigned shift by zero all negative smis are problematic. + if (shift_value == 0 && op == Token::SHR) problematic_mask |= 0x80000000; + __ tst(tos, Operand(problematic_mask)); + deferred->Branch(ne); // Go slow for problematic input. switch (op) { case Token::SHL: { if (shift_value != 0) { - __ mov(scratch, Operand(scratch, LSL, shift_value)); + int adjusted_shift = shift_value - kSmiTagSize; + ASSERT(adjusted_shift >= 0); + if (adjusted_shift != 0) { + __ mov(scratch, Operand(tos, LSL, adjusted_shift)); + // Check that the *signed* result fits in a smi. + __ add(scratch2, scratch, Operand(0x40000000), SetCC); + deferred->Branch(mi); + __ mov(tos, Operand(scratch, LSL, kSmiTagSize)); + } else { + // Check that the *signed* result fits in a smi. + __ add(scratch2, tos, Operand(0x40000000), SetCC); + deferred->Branch(mi); + __ mov(tos, Operand(tos, LSL, kSmiTagSize)); + } } - // check that the *signed* result fits in a smi - __ add(scratch2, scratch, Operand(0x40000000), SetCC); - deferred->Branch(mi); break; } case Token::SHR: { - // LSR by immediate 0 means shifting 32 bits. if (shift_value != 0) { + __ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // Remove tag. + // LSR by immediate 0 means shifting 32 bits. __ mov(scratch, Operand(scratch, LSR, shift_value)); + if (shift_value == 1) { + // check that the *unsigned* result fits in a smi + // neither of the two high-order bits can be set: + // - 0x80000000: high bit would be lost when smi tagging + // - 0x40000000: this number would convert to negative when + // smi tagging these two cases can only happen with shifts + // by 0 or 1 when handed a valid smi + __ tst(scratch, Operand(0xc0000000)); + deferred->Branch(ne); + } + __ mov(tos, Operand(scratch, LSL, kSmiTagSize)); } - // check that the *unsigned* result fits in a smi - // neither of the two high-order bits can be set: - // - 0x80000000: high bit would be lost when smi tagging - // - 0x40000000: this number would convert to negative when - // smi tagging these two cases can only happen with shifts - // by 0 or 1 when handed a valid smi - __ tst(scratch, Operand(0xc0000000)); - deferred->Branch(ne); break; } case Token::SAR: { + // In the ARM instructions set, ASR by immediate 0 means shifting 32 + // bits. if (shift_value != 0) { - // ASR by immediate 0 means shifting 32 bits. - __ mov(scratch, Operand(scratch, ASR, shift_value)); + // Do the shift and the tag removal in one operation. If the shift + // is 31 bits (the highest possible value) then we emit the + // instruction as a shift by 0 which means shift arithmetically by + // 32. + __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f)); + // Put tag back. + __ mov(tos, Operand(tos, LSL, kSmiTagSize)); } break; } default: UNREACHABLE(); } - __ mov(tos, Operand(scratch, LSL, kSmiTagSize)); deferred->BindExit(); frame_->EmitPush(tos); break; @@ -1343,6 +1370,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand, // give us a megamorphic load site. Not super, but it works. LoadAndSpill(applicand); Handle name = Factory::LookupAsciiSymbol("apply"); + frame_->Dup(); frame_->CallLoadIC(name, RelocInfo::CODE_TARGET); frame_->EmitPush(r0); @@ -1549,7 +1577,7 @@ void CodeGenerator::VisitBlock(Block* node) { VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ Block"); CodeForStatementPosition(node); - node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); + node->break_target()->SetExpectedHeight(); VisitStatementsAndSpill(node->statements()); if (node->break_target()->is_linked()) { node->break_target()->Bind(); @@ -1836,7 +1864,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) { VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ SwitchStatement"); CodeForStatementPosition(node); - node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); + node->break_target()->SetExpectedHeight(); LoadAndSpill(node->tag()); @@ -1925,7 +1953,7 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) { VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ DoWhileStatement"); CodeForStatementPosition(node); - node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); + node->break_target()->SetExpectedHeight(); JumpTarget body(JumpTarget::BIDIRECTIONAL); IncrementLoopNesting(); @@ -1935,14 +1963,14 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) { ConditionAnalysis info = AnalyzeCondition(node->cond()); switch (info) { case ALWAYS_TRUE: - node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL); + node->continue_target()->SetExpectedHeight(); node->continue_target()->Bind(); break; case ALWAYS_FALSE: - node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); + node->continue_target()->SetExpectedHeight(); break; case DONT_KNOW: - node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); + node->continue_target()->SetExpectedHeight(); body.Bind(); break; } @@ -2006,12 +2034,12 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) { ConditionAnalysis info = AnalyzeCondition(node->cond()); if (info == ALWAYS_FALSE) return; - node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); + node->break_target()->SetExpectedHeight(); IncrementLoopNesting(); // Label the top of the loop with the continue target for the backward // CFG edge. - node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL); + node->continue_target()->SetExpectedHeight(); node->continue_target()->Bind(); if (info == DONT_KNOW) { @@ -2060,17 +2088,17 @@ void CodeGenerator::VisitForStatement(ForStatement* node) { ConditionAnalysis info = AnalyzeCondition(node->cond()); if (info == ALWAYS_FALSE) return; - node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); + node->break_target()->SetExpectedHeight(); IncrementLoopNesting(); // If there is no update statement, label the top of the loop with the // continue target, otherwise with the loop target. JumpTarget loop(JumpTarget::BIDIRECTIONAL); if (node->next() == NULL) { - node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL); + node->continue_target()->SetExpectedHeight(); node->continue_target()->Bind(); } else { - node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); + node->continue_target()->SetExpectedHeight(); loop.Bind(); } @@ -2275,8 +2303,8 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) { // sp[4] : enumerable // Grab the current frame's height for the break and continue // targets only after all the state is pushed on the frame. - node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); - node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); + node->break_target()->SetExpectedHeight(); + node->continue_target()->SetExpectedHeight(); // Load the current count to r0, load the length to r1. __ ldrd(r0, frame_->ElementAt(0)); @@ -2766,45 +2794,13 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { JumpTarget slow; JumpTarget done; - // Generate fast-case code for variables that might be shadowed by - // eval-introduced variables. Eval is used a lot without - // introducing variables. In those cases, we do not want to - // perform a runtime call for all variables in the scope - // containing the eval. - if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) { - LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow); - // If there was no control flow to slow, we can exit early. - if (!slow.is_linked()) { - frame_->EmitPush(r0); - return; - } - frame_->SpillAll(); - - done.Jump(); - - } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) { - frame_->SpillAll(); - Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot(); - // Only generate the fast case for locals that rewrite to slots. - // This rules out argument loads because eval forces arguments - // access to be through the arguments object. - if (potential_slot != NULL) { - __ ldr(r0, - ContextSlotOperandCheckExtensions(potential_slot, - r1, - r2, - &slow)); - if (potential_slot->var()->mode() == Variable::CONST) { - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(r0, ip); - __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); - } - // There is always control flow to slow from - // ContextSlotOperandCheckExtensions so we have to jump around - // it. - done.Jump(); - } - } + // Generate fast case for loading from slots that correspond to + // local/global variables or arguments unless they are shadowed by + // eval-introduced bindings. + EmitDynamicLoadFromSlotFastCase(slot, + typeof_state, + &slow, + &done); slow.Bind(); VirtualFrame::SpilledScope spilled_scope(frame_); @@ -3014,8 +3010,67 @@ void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot, typeof_state == INSIDE_TYPEOF ? RelocInfo::CODE_TARGET : RelocInfo::CODE_TARGET_CONTEXT); - // Drop the global object. The result is in r0. - frame_->Drop(); +} + + +void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot, + TypeofState typeof_state, + JumpTarget* slow, + JumpTarget* done) { + // Generate fast-case code for variables that might be shadowed by + // eval-introduced variables. Eval is used a lot without + // introducing variables. In those cases, we do not want to + // perform a runtime call for all variables in the scope + // containing the eval. + if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) { + LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow); + frame_->SpillAll(); + done->Jump(); + + } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) { + frame_->SpillAll(); + Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot(); + Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite(); + if (potential_slot != NULL) { + // Generate fast case for locals that rewrite to slots. + __ ldr(r0, + ContextSlotOperandCheckExtensions(potential_slot, + r1, + r2, + slow)); + if (potential_slot->var()->mode() == Variable::CONST) { + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); + __ cmp(r0, ip); + __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); + } + done->Jump(); + } else if (rewrite != NULL) { + // Generate fast case for argument loads. + Property* property = rewrite->AsProperty(); + if (property != NULL) { + VariableProxy* obj_proxy = property->obj()->AsVariableProxy(); + Literal* key_literal = property->key()->AsLiteral(); + if (obj_proxy != NULL && + key_literal != NULL && + obj_proxy->IsArguments() && + key_literal->handle()->IsSmi()) { + // Load arguments object if there are no eval-introduced + // variables. Then load the argument from the arguments + // object using keyed load. + __ ldr(r0, + ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(), + r1, + r2, + slow)); + frame_->EmitPush(r0); + __ mov(r1, Operand(key_literal->handle())); + frame_->EmitPush(r1); + EmitKeyedLoad(); + done->Jump(); + } + } + } + } } @@ -3368,7 +3423,6 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) { frame_->Dup(); } EmitNamedLoad(name, var != NULL); - frame_->Drop(); // Receiver is left on the stack. frame_->EmitPush(r0); // Perform the binary operation. @@ -3507,9 +3561,7 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) { // Perform the assignment. It is safe to ignore constants here. ASSERT(node->op() != Token::INIT_CONST); CodeForSourcePosition(node->position()); - frame_->PopToR0(); EmitKeyedStore(prop->key()->type()); - frame_->Drop(2); // Key and receiver are left on the stack. frame_->EmitPush(r0); // Stack layout: @@ -3705,52 +3757,26 @@ void CodeGenerator::VisitCall(Call* node) { // ---------------------------------- // JavaScript examples: // - // with (obj) foo(1, 2, 3) // foo is in obj + // with (obj) foo(1, 2, 3) // foo may be in obj. // // function f() {}; // function g() { // eval(...); - // f(); // f could be in extension object + // f(); // f could be in extension object. // } // ---------------------------------- // JumpTargets do not yet support merging frames so the frame must be // spilled when jumping to these targets. - JumpTarget slow; - JumpTarget done; + JumpTarget slow, done; - // Generate fast-case code for variables that might be shadowed by - // eval-introduced variables. Eval is used a lot without - // introducing variables. In those cases, we do not want to - // perform a runtime call for all variables in the scope - // containing the eval. - if (var->mode() == Variable::DYNAMIC_GLOBAL) { - LoadFromGlobalSlotCheckExtensions(var->slot(), NOT_INSIDE_TYPEOF, &slow); - frame_->EmitPush(r0); - LoadGlobalReceiver(r1); - done.Jump(); - - } else if (var->mode() == Variable::DYNAMIC_LOCAL) { - Slot* potential_slot = var->local_if_not_shadowed()->slot(); - // Only generate the fast case for locals that rewrite to slots. - // This rules out argument loads because eval forces arguments - // access to be through the arguments object. - if (potential_slot != NULL) { - __ ldr(r0, - ContextSlotOperandCheckExtensions(potential_slot, - r1, - r2, - &slow)); - if (potential_slot->var()->mode() == Variable::CONST) { - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(r0, ip); - __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); - } - frame_->EmitPush(r0); - LoadGlobalReceiver(r1); - done.Jump(); - } - } + // Generate fast case for loading functions from slots that + // correspond to local/global variables or arguments unless they + // are shadowed by eval-introduced bindings. + EmitDynamicLoadFromSlotFastCase(var->slot(), + NOT_INSIDE_TYPEOF, + &slow, + &done); slow.Bind(); // Load the function @@ -3764,7 +3790,18 @@ void CodeGenerator::VisitCall(Call* node) { frame_->EmitPush(r0); // function frame_->EmitPush(r1); // receiver - done.Bind(); + // If fast case code has been generated, emit code to push the + // function and receiver and have the slow path jump around this + // code. + if (done.is_linked()) { + JumpTarget call; + call.Jump(); + done.Bind(); + frame_->EmitPush(r0); // function + LoadGlobalReceiver(r1); // receiver + call.Bind(); + } + // Call the function. At this point, everything is spilled but the // function and receiver are in r0 and r1. CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position()); @@ -4892,7 +4929,6 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - VirtualFrame::SpilledScope spilled_scope(frame_); Comment cmnt(masm_, "[ CountOperation"); bool is_postfix = node->is_postfix(); @@ -4901,10 +4937,8 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { Variable* var = node->expression()->AsVariableProxy()->AsVariable(); bool is_const = (var != NULL && var->mode() == Variable::CONST); - // Postfix: Make room for the result. if (is_postfix) { - __ mov(r0, Operand(0)); - frame_->EmitPush(r0); + frame_->EmitPush(Operand(Smi::FromInt(0))); } // A constant reference is not saved to, so a constant reference is not a @@ -4914,35 +4948,33 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { // Spoof the virtual frame to have the expected height (one higher // than on entry). if (!is_postfix) { - __ mov(r0, Operand(Smi::FromInt(0))); - frame_->EmitPush(r0); + frame_->EmitPush(Operand(Smi::FromInt(0))); } ASSERT_EQ(original_height + 1, frame_->height()); return; } + // This pushes 0, 1 or 2 words on the object to be used later when updating + // the target. It also pushes the current value of the target. target.GetValue(); - frame_->EmitPop(r0); JumpTarget slow; JumpTarget exit; - // Load the value (1) into register r1. - __ mov(r1, Operand(Smi::FromInt(1))); - // Check for smi operand. - __ tst(r0, Operand(kSmiTagMask)); + Register value = frame_->PopToRegister(); + __ tst(value, Operand(kSmiTagMask)); slow.Branch(ne); // Postfix: Store the old value as the result. if (is_postfix) { - __ str(r0, frame_->ElementAt(target.size())); + frame_->SetElementAt(value, target.size()); } // Perform optimistic increment/decrement. if (is_increment) { - __ add(r0, r0, Operand(r1), SetCC); + __ add(value, value, Operand(Smi::FromInt(1)), SetCC); } else { - __ sub(r0, r0, Operand(r1), SetCC); + __ sub(value, value, Operand(Smi::FromInt(1)), SetCC); } // If the increment/decrement didn't overflow, we're done. @@ -4950,41 +4982,50 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { // Revert optimistic increment/decrement. if (is_increment) { - __ sub(r0, r0, Operand(r1)); + __ sub(value, value, Operand(Smi::FromInt(1))); } else { - __ add(r0, r0, Operand(r1)); + __ add(value, value, Operand(Smi::FromInt(1))); } - // Slow case: Convert to number. + // Slow case: Convert to number. At this point the + // value to be incremented is in the value register.. slow.Bind(); + + // Convert the operand to a number. + frame_->EmitPush(value); + { - // Convert the operand to a number. - frame_->EmitPush(r0); + VirtualFrame::SpilledScope spilled(frame_); frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1); - } - if (is_postfix) { - // Postfix: store to result (on the stack). - __ str(r0, frame_->ElementAt(target.size())); - } - // Compute the new value. - __ mov(r1, Operand(Smi::FromInt(1))); - frame_->EmitPush(r0); - frame_->EmitPush(r1); - if (is_increment) { - frame_->CallRuntime(Runtime::kNumberAdd, 2); - } else { - frame_->CallRuntime(Runtime::kNumberSub, 2); + if (is_postfix) { + // Postfix: store to result (on the stack). + __ str(r0, frame_->ElementAt(target.size())); + } + + // Compute the new value. + frame_->EmitPush(r0); + frame_->EmitPush(Operand(Smi::FromInt(1))); + if (is_increment) { + frame_->CallRuntime(Runtime::kNumberAdd, 2); + } else { + frame_->CallRuntime(Runtime::kNumberSub, 2); + } } + __ Move(value, r0); // Store the new value in the target if not const. + // At this point the answer is in the value register. exit.Bind(); - frame_->EmitPush(r0); + frame_->EmitPush(value); + // Set the target with the result, leaving the result on + // top of the stack. Removes the target from the stack if + // it has a non-zero size. if (!is_const) target.SetValue(NOT_CONST_INIT); } // Postfix: Discard the new value and use the old. - if (is_postfix) frame_->EmitPop(r0); + if (is_postfix) frame_->Pop(); ASSERT_EQ(original_height + 1, frame_->height()); } @@ -5387,26 +5428,30 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { class DeferredReferenceGetNamedValue: public DeferredCode { public: - explicit DeferredReferenceGetNamedValue(Handle name) : name_(name) { + explicit DeferredReferenceGetNamedValue(Register receiver, + Handle name) + : receiver_(receiver), name_(name) { set_comment("[ DeferredReferenceGetNamedValue"); } virtual void Generate(); private: + Register receiver_; Handle name_; }; void DeferredReferenceGetNamedValue::Generate() { + ASSERT(receiver_.is(r0) || receiver_.is(r1)); + Register scratch1 = VirtualFrame::scratch0(); Register scratch2 = VirtualFrame::scratch1(); __ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2); __ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2); - // Setup the registers and call load IC. - // On entry to this deferred code, r0 is assumed to already contain the - // receiver from the top of the stack. + // Ensure receiver in r0 and name in r2 to match load ic calling convention. + __ Move(r0, receiver_); __ mov(r2, Operand(name_)); // The rest of the instructions in the deferred code must be together. @@ -5427,20 +5472,34 @@ void DeferredReferenceGetNamedValue::Generate() { class DeferredReferenceGetKeyedValue: public DeferredCode { public: - DeferredReferenceGetKeyedValue() { + DeferredReferenceGetKeyedValue(Register key, Register receiver) + : key_(key), receiver_(receiver) { set_comment("[ DeferredReferenceGetKeyedValue"); } virtual void Generate(); + + private: + Register key_; + Register receiver_; }; void DeferredReferenceGetKeyedValue::Generate() { + ASSERT((key_.is(r0) && receiver_.is(r1)) || + (key_.is(r1) && receiver_.is(r0))); + Register scratch1 = VirtualFrame::scratch0(); Register scratch2 = VirtualFrame::scratch1(); __ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2); __ IncrementCounter(&Counters::keyed_load_inline_miss, 1, scratch1, scratch2); + // Ensure key in r0 and receiver in r1 to match keyed load ic calling + // convention. + if (key_.is(r1)) { + __ Swap(r0, r1, ip); + } + // The rest of the instructions in the deferred code must be together. { Assembler::BlockConstPoolScope block_const_pool(masm_); // Call keyed load IC. It has the arguments key and receiver in r0 and r1. @@ -5460,11 +5519,19 @@ void DeferredReferenceGetKeyedValue::Generate() { class DeferredReferenceSetKeyedValue: public DeferredCode { public: - DeferredReferenceSetKeyedValue() { + DeferredReferenceSetKeyedValue(Register value, + Register key, + Register receiver) + : value_(value), key_(key), receiver_(receiver) { set_comment("[ DeferredReferenceSetKeyedValue"); } virtual void Generate(); + + private: + Register value_; + Register key_; + Register receiver_; }; @@ -5475,10 +5542,17 @@ void DeferredReferenceSetKeyedValue::Generate() { __ IncrementCounter( &Counters::keyed_store_inline_miss, 1, scratch1, scratch2); + // Ensure value in r0, key in r1 and receiver in r2 to match keyed store ic + // calling convention. + if (value_.is(r1)) { + __ Swap(r0, r1, ip); + } + ASSERT(receiver_.is(r2)); + // The rest of the instructions in the deferred code must be together. { Assembler::BlockConstPoolScope block_const_pool(masm_); - // Call keyed load IC. It has receiver amd key on the stack and the value to - // store in r0. + // Call keyed store IC. It has the arguments value, key and receiver in r0, + // r1 and r2. Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); // The call must be followed by a nop instruction to indicate that the @@ -5516,10 +5590,11 @@ void CodeGenerator::EmitNamedLoad(Handle name, bool is_contextual) { // this code // Load the receiver from the stack. - frame_->SpillAllButCopyTOSToR0(); + Register receiver = frame_->PopToRegister(); + VirtualFrame::SpilledScope spilled(frame_); DeferredReferenceGetNamedValue* deferred = - new DeferredReferenceGetNamedValue(name); + new DeferredReferenceGetNamedValue(receiver, name); #ifdef DEBUG int kInlinedNamedLoadInstructions = 7; @@ -5529,19 +5604,19 @@ void CodeGenerator::EmitNamedLoad(Handle name, bool is_contextual) { { Assembler::BlockConstPoolScope block_const_pool(masm_); // Check that the receiver is a heap object. - __ tst(r0, Operand(kSmiTagMask)); + __ tst(receiver, Operand(kSmiTagMask)); deferred->Branch(eq); // Check the map. The null map used below is patched by the inline cache // code. - __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ ldr(r2, FieldMemOperand(receiver, HeapObject::kMapOffset)); __ mov(r3, Operand(Factory::null_value())); __ cmp(r2, r3); deferred->Branch(ne); // Initially use an invalid index. The index will be patched by the // inline cache code. - __ ldr(r0, MemOperand(r0, 0)); + __ ldr(r0, MemOperand(receiver, 0)); // Make sure that the expected number of instructions are generated. ASSERT_EQ(kInlinedNamedLoadInstructions, @@ -5576,15 +5651,14 @@ void CodeGenerator::EmitKeyedLoad() { __ IncrementCounter(&Counters::keyed_load_inline, 1, frame_->scratch0(), frame_->scratch1()); - // Load the key and receiver from the stack to r0 and r1. - frame_->PopToR1R0(); - Register receiver = r0; - Register key = r1; + // Load the key and receiver from the stack. + Register key = frame_->PopToRegister(); + Register receiver = frame_->PopToRegister(key); VirtualFrame::SpilledScope spilled(frame_); - // The deferred code expects key and receiver in r0 and r1. + // The deferred code expects key and receiver in registers. DeferredReferenceGetKeyedValue* deferred = - new DeferredReferenceGetKeyedValue(); + new DeferredReferenceGetKeyedValue(key, receiver); // Check that the receiver is a heap object. __ tst(receiver, Operand(kSmiTagMask)); @@ -5594,17 +5668,16 @@ void CodeGenerator::EmitKeyedLoad() { // property code which can be patched. Therefore the exact number of // instructions generated need to be fixed, so the constant pool is blocked // while generating this code. -#ifdef DEBUG - int kInlinedKeyedLoadInstructions = 19; - Label check_inlined_codesize; - masm_->bind(&check_inlined_codesize); -#endif { Assembler::BlockConstPoolScope block_const_pool(masm_); Register scratch1 = VirtualFrame::scratch0(); Register scratch2 = VirtualFrame::scratch1(); // Check the map. The null map used below is patched by the inline cache // code. __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); +#ifdef DEBUG + Label check_inlined_codesize; + masm_->bind(&check_inlined_codesize); +#endif __ mov(scratch2, Operand(Factory::null_value())); __ cmp(scratch1, scratch2); deferred->Branch(ne); @@ -5632,17 +5705,15 @@ void CodeGenerator::EmitKeyedLoad() { __ add(scratch1, scratch1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ ldr(r0, + __ ldr(scratch1, MemOperand(scratch1, key, LSL, kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize))); - __ cmp(r0, scratch2); - // This is the only branch to deferred where r0 and r1 do not contain the - // receiver and key. We can't just load undefined here because we have to - // check the prototype. + __ cmp(scratch1, scratch2); deferred->Branch(eq); + __ mov(r0, scratch1); // Make sure that the expected number of instructions are generated. - ASSERT_EQ(kInlinedKeyedLoadInstructions, + ASSERT_EQ(kInlinedKeyedLoadInstructionsAfterPatch, masm_->InstructionsGeneratedSince(&check_inlined_codesize)); } @@ -5652,78 +5723,86 @@ void CodeGenerator::EmitKeyedLoad() { void CodeGenerator::EmitKeyedStore(StaticType* key_type) { - VirtualFrame::SpilledScope scope(frame_); // Generate inlined version of the keyed store if the code is in a loop // and the key is likely to be a smi. if (loop_nesting() > 0 && key_type->IsLikelySmi()) { // Inline the keyed store. Comment cmnt(masm_, "[ Inlined store to keyed property"); - DeferredReferenceSetKeyedValue* deferred = - new DeferredReferenceSetKeyedValue(); + Register scratch1 = VirtualFrame::scratch0(); + Register scratch2 = VirtualFrame::scratch1(); + Register scratch3 = r3; // Counter will be decremented in the deferred code. Placed here to avoid // having it in the instruction stream below where patching will occur. __ IncrementCounter(&Counters::keyed_store_inline, 1, - frame_->scratch0(), frame_->scratch1()); + scratch1, scratch2); + + // Load the value, key and receiver from the stack. + Register value = frame_->PopToRegister(); + Register key = frame_->PopToRegister(value); + Register receiver = r2; + frame_->EmitPop(receiver); + VirtualFrame::SpilledScope spilled(frame_); + + // The deferred code expects value, key and receiver in registers. + DeferredReferenceSetKeyedValue* deferred = + new DeferredReferenceSetKeyedValue(value, key, receiver); // Check that the value is a smi. As this inlined code does not set the // write barrier it is only possible to store smi values. - __ tst(r0, Operand(kSmiTagMask)); + __ tst(value, Operand(kSmiTagMask)); deferred->Branch(ne); - // Load the key and receiver from the stack. - __ ldr(r1, MemOperand(sp, 0)); - __ ldr(r2, MemOperand(sp, kPointerSize)); - // Check that the key is a smi. - __ tst(r1, Operand(kSmiTagMask)); + __ tst(key, Operand(kSmiTagMask)); deferred->Branch(ne); // Check that the receiver is a heap object. - __ tst(r2, Operand(kSmiTagMask)); + __ tst(receiver, Operand(kSmiTagMask)); deferred->Branch(eq); // Check that the receiver is a JSArray. - __ CompareObjectType(r2, r3, r3, JS_ARRAY_TYPE); + __ CompareObjectType(receiver, scratch1, scratch1, JS_ARRAY_TYPE); deferred->Branch(ne); // Check that the key is within bounds. Both the key and the length of // the JSArray are smis. Use unsigned comparison to handle negative keys. - __ ldr(r3, FieldMemOperand(r2, JSArray::kLengthOffset)); - __ cmp(r3, r1); + __ ldr(scratch1, FieldMemOperand(receiver, JSArray::kLengthOffset)); + __ cmp(scratch1, key); deferred->Branch(ls); // Unsigned less equal. // The following instructions are the part of the inlined store keyed // property code which can be patched. Therefore the exact number of // instructions generated need to be fixed, so the constant pool is blocked // while generating this code. -#ifdef DEBUG - int kInlinedKeyedStoreInstructions = 7; - Label check_inlined_codesize; - masm_->bind(&check_inlined_codesize); -#endif { Assembler::BlockConstPoolScope block_const_pool(masm_); // Get the elements array from the receiver and check that it // is not a dictionary. - __ ldr(r3, FieldMemOperand(r2, JSObject::kElementsOffset)); - __ ldr(r4, FieldMemOperand(r3, JSObject::kMapOffset)); + __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset)); // Read the fixed array map from the constant pool (not from the root // array) so that the value can be patched. When debugging, we patch this // comparison to always fail so that we will hit the IC call in the // deferred code which will allow the debugger to break for fast case // stores. - __ mov(r5, Operand(Factory::fixed_array_map())); - __ cmp(r4, r5); +#ifdef DEBUG + Label check_inlined_codesize; + masm_->bind(&check_inlined_codesize); +#endif + __ mov(scratch3, Operand(Factory::fixed_array_map())); + __ cmp(scratch2, scratch3); deferred->Branch(ne); // Store the value. - __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ str(r0, MemOperand(r3, r1, LSL, - kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize))); + __ add(scratch1, scratch1, + Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ str(value, + MemOperand(scratch1, key, LSL, + kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize))); // Make sure that the expected number of instructions are generated. - ASSERT_EQ(kInlinedKeyedStoreInstructions, + ASSERT_EQ(kInlinedKeyedStoreInstructionsAfterPatch, masm_->InstructionsGeneratedSince(&check_inlined_codesize)); } @@ -5786,19 +5865,20 @@ void Reference::GetValue() { Variable* var = expression_->AsVariableProxy()->AsVariable(); bool is_global = var != NULL; ASSERT(!is_global || var->is_global()); + if (persist_after_get_) { + cgen_->frame()->Dup(); + } cgen_->EmitNamedLoad(GetName(), is_global); cgen_->frame()->EmitPush(r0); - if (!persist_after_get_) { - cgen_->UnloadReference(this); - } + if (!persist_after_get_) set_unloaded(); break; } case KEYED: { + ASSERT(property != NULL); if (persist_after_get_) { cgen_->frame()->Dup2(); } - ASSERT(property != NULL); cgen_->EmitKeyedLoad(); cgen_->frame()->EmitPush(r0); if (!persist_after_get_) set_unloaded(); @@ -5839,16 +5919,13 @@ void Reference::SetValue(InitState init_state) { } case KEYED: { - VirtualFrame::SpilledScope scope(frame); Comment cmnt(masm, "[ Store to keyed Property"); Property* property = expression_->AsProperty(); ASSERT(property != NULL); cgen_->CodeForSourcePosition(property->position()); - - frame->EmitPop(r0); // Value. cgen_->EmitKeyedStore(property->key()->type()); frame->EmitPush(r0); - cgen_->UnloadReference(this); + set_unloaded(); break; } @@ -8486,9 +8563,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Just jump directly to runtime if native RegExp is not selected at compile // time or if regexp entry in generated code is turned off runtime switch or // at compilation. -#ifndef V8_NATIVE_REGEXP +#ifdef V8_INTERPRETED_REGEXP __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); -#else // V8_NATIVE_REGEXP +#else // V8_INTERPRETED_REGEXP if (!FLAG_regexp_entry_native) { __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); return; @@ -8598,7 +8675,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ ldr(last_match_info_elements, FieldMemOperand(r0, JSArray::kElementsOffset)); __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); - __ LoadRoot(ip, kFixedArrayMapRootIndex); + __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); __ cmp(r0, ip); __ b(ne, &runtime); // Check that the last match info has space for the capture registers and the @@ -8821,7 +8898,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Do the runtime call to execute the regexp. __ bind(&runtime); __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); -#endif // V8_NATIVE_REGEXP +#endif // V8_INTERPRETED_REGEXP } @@ -9967,3 +10044,5 @@ void StringAddStub::Generate(MacroAssembler* masm) { #undef __ } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index bb76b633bd..361ea131de 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -29,6 +29,7 @@ #define V8_ARM_CODEGEN_ARM_H_ #include "ic-inl.h" +#include "ast.h" namespace v8 { namespace internal { @@ -36,6 +37,7 @@ namespace internal { // Forward declarations class CompilationInfo; class DeferredCode; +class JumpTarget; class RegisterAllocator; class RegisterFile; @@ -217,6 +219,10 @@ class CodeGenerator: public AstVisitor { // expected arguments. Otherwise return -1. static int InlineRuntimeCallArgumentsCount(Handle name); + // Constants related to patching of inlined load/store. + static const int kInlinedKeyedLoadInstructionsAfterPatch = 19; + static const int kInlinedKeyedStoreInstructionsAfterPatch = 5; + private: // Construction/Destruction explicit CodeGenerator(MacroAssembler* masm); @@ -309,6 +315,7 @@ class CodeGenerator: public AstVisitor { // Read a value from a slot and leave it on top of the expression stack. void LoadFromSlot(Slot* slot, TypeofState typeof_state); void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state); + // Store the value on top of the stack to a slot. void StoreToSlot(Slot* slot, InitState init_state); @@ -338,6 +345,15 @@ class CodeGenerator: public AstVisitor { TypeofState typeof_state, JumpTarget* slow); + // Support for loading from local/global variables and arguments + // whose location is known unless they are shadowed by + // eval-introduced bindings. Generates no code for unsupported slot + // types and therefore expects to fall through to the slow jump target. + void EmitDynamicLoadFromSlotFastCase(Slot* slot, + TypeofState typeof_state, + JumpTarget* slow, + JumpTarget* done); + // Special code for typeof expressions: Unfortunately, we must // be careful when loading the expression in 'typeof' // expressions. We are not allowed to throw reference errors for diff --git a/deps/v8/src/arm/constants-arm.cc b/deps/v8/src/arm/constants-arm.cc index 2e371207e0..4e186d1382 100644 --- a/deps/v8/src/arm/constants-arm.cc +++ b/deps/v8/src/arm/constants-arm.cc @@ -27,6 +27,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_ARM) + #include "constants-arm.h" @@ -128,3 +130,5 @@ int Registers::Number(const char* name) { } } // namespace assembler::arm + +#endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc index d50c2038aa..3d3e6ae9d4 100644 --- a/deps/v8/src/arm/cpu-arm.cc +++ b/deps/v8/src/arm/cpu-arm.cc @@ -32,6 +32,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_ARM) + #include "cpu.h" #include "macro-assembler.h" @@ -136,3 +138,5 @@ void CPU::DebugBreak() { } } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc index d02ba764f8..69fc504e7f 100644 --- a/deps/v8/src/arm/debug-arm.cc +++ b/deps/v8/src/arm/debug-arm.cc @@ -27,6 +27,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_ARM) + #include "codegen-inl.h" #include "debug.h" @@ -170,10 +172,11 @@ void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) { void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) { // ---------- S t a t e -------------- + // -- r0 : value + // -- r1 : key + // -- r2 : receiver // -- lr : return address - // -- sp[0] : key - // -- sp[4] : receiver - Generate_DebugBreakCallHelper(masm, 0); + Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit()); } @@ -237,3 +240,5 @@ const int Debug::kFrameDropperFrameSize = -1; #endif // ENABLE_DEBUGGER_SUPPORT } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc index 4051096fca..0ac7d19f66 100644 --- a/deps/v8/src/arm/disasm-arm.cc +++ b/deps/v8/src/arm/disasm-arm.cc @@ -56,6 +56,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_ARM) + #include "constants-arm.h" #include "disasm.h" #include "macro-assembler.h" @@ -1356,3 +1358,5 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) { } // namespace disasm + +#endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/fast-codegen-arm.cc b/deps/v8/src/arm/fast-codegen-arm.cc index 5dedc29ab9..48eaf46aaf 100644 --- a/deps/v8/src/arm/fast-codegen-arm.cc +++ b/deps/v8/src/arm/fast-codegen-arm.cc @@ -27,6 +27,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_ARM) + #include "codegen-inl.h" #include "fast-codegen.h" #include "scopes.h" @@ -236,3 +238,5 @@ void FastCodeGenerator::Generate(CompilationInfo* compilation_info) { } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc index 0cb7f12302..271e4a6f0a 100644 --- a/deps/v8/src/arm/frames-arm.cc +++ b/deps/v8/src/arm/frames-arm.cc @@ -27,12 +27,10 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_ARM) + #include "frames-inl.h" -#ifdef V8_ARM_VARIANT_THUMB -#include "arm/assembler-thumb2-inl.h" -#else #include "arm/assembler-arm-inl.h" -#endif namespace v8 { @@ -121,3 +119,5 @@ Address InternalFrame::GetCallerStackPointer() const { } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index 6680af9a97..c2f6ea96bd 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -27,6 +27,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_ARM) + #include "codegen-inl.h" #include "compiler.h" #include "debug.h" @@ -397,10 +399,10 @@ void FullCodeGenerator::Apply(Expression::Context context, case Expression::kValue: { Label done; __ bind(materialize_true); - __ mov(result_register(), Operand(Factory::true_value())); + __ LoadRoot(result_register(), Heap::kTrueValueRootIndex); __ jmp(&done); __ bind(materialize_false); - __ mov(result_register(), Operand(Factory::false_value())); + __ LoadRoot(result_register(), Heap::kFalseValueRootIndex); __ bind(&done); switch (location_) { case kAccumulator: @@ -417,7 +419,7 @@ void FullCodeGenerator::Apply(Expression::Context context, case Expression::kValueTest: __ bind(materialize_true); - __ mov(result_register(), Operand(Factory::true_value())); + __ LoadRoot(result_register(), Heap::kTrueValueRootIndex); switch (location_) { case kAccumulator: break; @@ -430,7 +432,7 @@ void FullCodeGenerator::Apply(Expression::Context context, case Expression::kTestValue: __ bind(materialize_false); - __ mov(result_register(), Operand(Factory::false_value())); + __ LoadRoot(result_register(), Heap::kFalseValueRootIndex); switch (location_) { case kAccumulator: break; @@ -640,11 +642,11 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) { } Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + __ pop(r1); // Key. + __ pop(r2); // Receiver. __ Call(ic, RelocInfo::CODE_TARGET); - // Value in r0 is ignored (declarations are statements). Receiver - // and key on stack are discarded. - __ Drop(2); + // Value in r0 is ignored (declarations are statements). } } } @@ -661,19 +663,29 @@ void FullCodeGenerator::DeclareGlobals(Handle pairs) { } -void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) { - Comment cmnt(masm_, "[ FunctionLiteral"); +void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { + UNREACHABLE(); +} + + +void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { + UNREACHABLE(); +} - // Build the shared function info and instantiate the function based - // on it. - Handle function_info = - Compiler::BuildFunctionInfo(expr, script(), this); - if (HasStackOverflow()) return; - // Create a new closure. - __ mov(r0, Operand(function_info)); - __ stm(db_w, sp, cp.bit() | r0.bit()); - __ CallRuntime(Runtime::kNewClosure, 2); +void FullCodeGenerator::EmitNewClosure(Handle info) { + // Use the fast case closure allocation code that allocates in new + // space for nested functions that don't need literals cloning. + if (scope()->is_function_scope() && info->num_literals() == 0) { + FastNewClosureStub stub; + __ mov(r0, Operand(info)); + __ push(r0); + __ CallStub(&stub); + } else { + __ mov(r0, Operand(info)); + __ stm(db_w, sp, cp.bit() | r0.bit()); + __ CallRuntime(Runtime::kNewClosure, 2); + } Apply(context_, r0); } @@ -695,13 +707,12 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var, if (var->is_global() && !var->is_this()) { Comment cmnt(masm_, "Global variable"); // Use inline caching. Variable name is passed in r2 and the global - // object on the stack. + // object (receiver) in r0. __ ldr(r0, CodeGenerator::GlobalObject()); - __ push(r0); __ mov(r2, Operand(var->name())); Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT); - DropAndApply(1, context, r0); + Apply(context, r0); } else if (slot != NULL && slot->type() == Slot::LOOKUP) { Comment cmnt(masm_, "Lookup slot"); @@ -904,7 +915,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { void FullCodeGenerator::VisitAssignment(Assignment* expr) { Comment cmnt(masm_, "[ Assignment"); - ASSERT(expr->op() != Token::INIT_CONST); + // Invalid left-hand sides are rewritten to have a 'throw ReferenceError' + // on the left-hand side. + if (!expr->target()->IsValidLeftHandSide()) { + VisitForEffect(expr->target()); + return; + } + // Left-hand side can only be a property, a global or a (parameter or local) // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY. enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY }; @@ -984,6 +1001,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { switch (assign_type) { case VARIABLE: EmitVariableAssignment(expr->target()->AsVariableProxy()->var(), + expr->op(), context_); break; case NAMED_PROPERTY: @@ -1000,7 +1018,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); Literal* key = prop->key()->AsLiteral(); __ mov(r2, Operand(key->handle())); - __ ldr(r0, MemOperand(sp, 0)); + // Call load IC. It has arguments receiver and property name r0 and r2. Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); } @@ -1024,14 +1042,13 @@ void FullCodeGenerator::EmitBinaryOp(Token::Value op, void FullCodeGenerator::EmitVariableAssignment(Variable* var, + Token::Value op, Expression::Context context) { - // Three main cases: global variables, lookup slots, and all other - // types of slots. Left-hand-side parameters that rewrite to - // explicit property accesses do not reach here. + // Left-hand sides that rewrite to explicit property accesses do not reach + // here. ASSERT(var != NULL); ASSERT(var->is_global() || var->slot() != NULL); - Slot* slot = var->slot(); if (var->is_global()) { ASSERT(!var->is_this()); // Assignment to a global variable. Use inline caching for the @@ -1042,43 +1059,61 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); - } else if (slot != NULL && slot->type() == Slot::LOOKUP) { - __ push(result_register()); // Value. - __ mov(r1, Operand(var->name())); - __ stm(db_w, sp, cp.bit() | r1.bit()); // Context and name. - __ CallRuntime(Runtime::kStoreContextSlot, 3); - - } else if (var->slot() != NULL) { + } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) { + // Perform the assignment for non-const variables and for initialization + // of const variables. Const assignments are simply skipped. + Label done; Slot* slot = var->slot(); switch (slot->type()) { - case Slot::LOCAL: case Slot::PARAMETER: + case Slot::LOCAL: + if (op == Token::INIT_CONST) { + // Detect const reinitialization by checking for the hole value. + __ ldr(r1, MemOperand(fp, SlotOffset(slot))); + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); + __ cmp(r1, ip); + __ b(ne, &done); + } + // Perform the assignment. __ str(result_register(), MemOperand(fp, SlotOffset(slot))); break; case Slot::CONTEXT: { MemOperand target = EmitSlotSearch(slot, r1); + if (op == Token::INIT_CONST) { + // Detect const reinitialization by checking for the hole value. + __ ldr(r1, target); + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); + __ cmp(r1, ip); + __ b(ne, &done); + } + // Perform the assignment and issue the write barrier. __ str(result_register(), target); - // RecordWrite may destroy all its register arguments. __ mov(r3, result_register()); int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; - __ mov(r2, Operand(offset)); __ RecordWrite(r1, r2, r3); break; } case Slot::LOOKUP: - UNREACHABLE(); + // Call the runtime for the assignment. The runtime will ignore + // const reinitialization. + __ push(r0); // Value. + __ mov(r0, Operand(slot->var()->name())); + __ Push(cp, r0); // Context and name. + if (op == Token::INIT_CONST) { + // The runtime will ignore const redeclaration. + __ CallRuntime(Runtime::kInitializeConstContextSlot, 3); + } else { + __ CallRuntime(Runtime::kStoreContextSlot, 3); + } break; } - - } else { - // Variables rewritten as properties are not treated as variables in - // assignments. - UNREACHABLE(); + __ bind(&done); } + Apply(context, result_register()); } @@ -1103,6 +1138,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) { // Record source code position before IC call. SetSourcePosition(expr->position()); __ mov(r2, Operand(prop->key()->AsLiteral()->handle())); + // Load receiver to r1. Leave a copy in the stack if needed for turning the + // receiver into fast case. if (expr->ends_initialization_block()) { __ ldr(r1, MemOperand(sp)); } else { @@ -1115,7 +1152,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) { // If the assignment ends an initialization block, revert to fast case. if (expr->ends_initialization_block()) { __ push(r0); // Result of assignment, saved even if not needed. - __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is under value. + // Receiver is under the result value. + __ ldr(ip, MemOperand(sp, kPointerSize)); __ push(ip); __ CallRuntime(Runtime::kToFastProperties, 1); __ pop(r0); @@ -1143,21 +1181,30 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) { // Record source code position before IC call. SetSourcePosition(expr->position()); + __ pop(r1); // Key. + // Load receiver to r2. Leave a copy in the stack if needed for turning the + // receiver into fast case. + if (expr->ends_initialization_block()) { + __ ldr(r2, MemOperand(sp)); + } else { + __ pop(r2); + } + Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); // If the assignment ends an initialization block, revert to fast case. if (expr->ends_initialization_block()) { __ push(r0); // Result of assignment, saved even if not needed. - // Receiver is under the key and value. - __ ldr(ip, MemOperand(sp, 2 * kPointerSize)); + // Receiver is under the result value. + __ ldr(ip, MemOperand(sp, kPointerSize)); __ push(ip); __ CallRuntime(Runtime::kToFastProperties, 1); __ pop(r0); + DropAndApply(1, context_, r0); + } else { + Apply(context_, r0); } - - // Receiver and key are still on stack. - DropAndApply(2, context_, r0); } @@ -1165,14 +1212,12 @@ void FullCodeGenerator::VisitProperty(Property* expr) { Comment cmnt(masm_, "[ Property"); Expression* key = expr->key(); - // Evaluate receiver. - VisitForValue(expr->obj(), kStack); - if (key->IsPropertyName()) { + VisitForValue(expr->obj(), kAccumulator); EmitNamedPropertyLoad(expr); - // Drop receiver left on the stack by IC. - DropAndApply(1, context_, r0); + Apply(context_, r0); } else { + VisitForValue(expr->obj(), kStack); VisitForValue(expr->key(), kAccumulator); __ pop(r1); EmitKeyedPropertyLoad(expr); @@ -1445,13 +1490,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { proxy->var()->is_global()) { Comment cmnt(masm_, "Global variable"); __ ldr(r0, CodeGenerator::GlobalObject()); - __ push(r0); __ mov(r2, Operand(proxy->name())); Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize)); // Use a regular load, not a contextual load, to avoid a reference // error. __ Call(ic, RelocInfo::CODE_TARGET); - __ str(r0, MemOperand(sp)); + __ push(r0); } else if (proxy != NULL && proxy->var()->slot() != NULL && proxy->var()->slot()->type() == Slot::LOOKUP) { @@ -1557,10 +1601,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { __ mov(ip, Operand(Smi::FromInt(0))); __ push(ip); } - VisitForValue(prop->obj(), kStack); if (assign_type == NAMED_PROPERTY) { + // Put the object both on the stack and in the accumulator. + VisitForValue(prop->obj(), kAccumulator); + __ push(r0); EmitNamedPropertyLoad(prop); } else { + VisitForValue(prop->obj(), kStack); VisitForValue(prop->key(), kAccumulator); __ ldr(r1, MemOperand(sp, 0)); __ push(r0); @@ -1631,6 +1678,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { case VARIABLE: if (expr->is_postfix()) { EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(), + Token::ASSIGN, Expression::kEffect); // For all contexts except kEffect: We have the result on // top of the stack. @@ -1639,6 +1687,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { } } else { EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(), + Token::ASSIGN, context_); } break; @@ -1657,15 +1706,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { break; } case KEYED_PROPERTY: { + __ pop(r1); // Key. + __ pop(r2); // Receiver. Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); if (expr->is_postfix()) { - __ Drop(2); // Result is on the stack under the key and the receiver. if (context_ != Expression::kEffect) { ApplyTOS(context_); } } else { - DropAndApply(2, context_, r0); + Apply(context_, r0); } break; } @@ -1877,3 +1927,5 @@ void FullCodeGenerator::ExitFinallyBlock() { #undef __ } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index 34ba5e5f78..ba318fd2ec 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -27,7 +27,10 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_ARM) + #include "assembler-arm.h" +#include "codegen.h" #include "codegen-inl.h" #include "disasm.h" #include "ic-inl.h" @@ -639,7 +642,9 @@ bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) { // Patch the map check. Address ldr_map_instr_address = - inline_end_address - 18 * Assembler::kInstrSize; + inline_end_address - + (CodeGenerator::kInlinedKeyedLoadInstructionsAfterPatch * + Assembler::kInstrSize); Assembler::set_target_address_at(ldr_map_instr_address, reinterpret_cast
    (map)); return true; @@ -669,7 +674,9 @@ bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) { // Patch the map check. Address ldr_map_instr_address = - inline_end_address - 5 * Assembler::kInstrSize; + inline_end_address - + (CodeGenerator::kInlinedKeyedStoreInstructionsAfterPatch * + Assembler::kInstrSize); Assembler::set_target_address_at(ldr_map_instr_address, reinterpret_cast
    (map)); return true; @@ -1204,13 +1211,13 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { // ---------- S t a t e -------------- // -- r0 : value + // -- r1 : key + // -- r2 : receiver // -- lr : return address - // -- sp[0] : key - // -- sp[1] : receiver // ----------------------------------- - __ ldm(ia, sp, r2.bit() | r3.bit()); - __ Push(r3, r2, r0); + // Push receiver, key and value for runtime call. + __ Push(r2, r1, r0); ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss)); __ TailCallExternalReference(ref, 3, 1); @@ -1220,12 +1227,13 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) { // ---------- S t a t e -------------- // -- r0 : value + // -- r1 : key + // -- r2 : receiver // -- lr : return address - // -- sp[0] : key - // -- sp[1] : receiver // ----------------------------------- - __ ldm(ia, sp, r1.bit() | r3.bit()); // r0 == value, r1 == key, r3 == object - __ Push(r3, r1, r0); + + // Push receiver, key and value for runtime call. + __ Push(r2, r1, r0); __ TailCallRuntime(Runtime::kSetProperty, 3, 1); } @@ -1234,147 +1242,135 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) { void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { // ---------- S t a t e -------------- // -- r0 : value + // -- r1 : key + // -- r2 : receiver // -- lr : return address - // -- sp[0] : key - // -- sp[1] : receiver // ----------------------------------- - Label slow, fast, array, extra, exit, check_pixel_array; + Label slow, fast, array, extra, check_pixel_array; + + // Register usage. + Register value = r0; + Register key = r1; + Register receiver = r2; + Register elements = r3; // Elements array of the receiver. + // r4 and r5 are used as general scratch registers. - // Get the key and the object from the stack. - __ ldm(ia, sp, r1.bit() | r3.bit()); // r1 = key, r3 = receiver // Check that the key is a smi. - __ tst(r1, Operand(kSmiTagMask)); + __ tst(key, Operand(kSmiTagMask)); __ b(ne, &slow); // Check that the object isn't a smi. - __ tst(r3, Operand(kSmiTagMask)); + __ tst(receiver, Operand(kSmiTagMask)); __ b(eq, &slow); // Get the map of the object. - __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); + __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset)); // Check that the receiver does not require access checks. We need // to do this because this generic stub does not perform map checks. - __ ldrb(ip, FieldMemOperand(r2, Map::kBitFieldOffset)); + __ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset)); __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded)); __ b(ne, &slow); // Check if the object is a JS array or not. - __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset)); - __ cmp(r2, Operand(JS_ARRAY_TYPE)); - // r1 == key. + __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); + __ cmp(r4, Operand(JS_ARRAY_TYPE)); __ b(eq, &array); // Check that the object is some kind of JS object. - __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE)); + __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); __ b(lt, &slow); - // Object case: Check key against length in the elements array. - __ ldr(r3, FieldMemOperand(r3, JSObject::kElementsOffset)); + __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); // Check that the object is in fast mode (not dictionary). - __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); + __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); - __ cmp(r2, ip); + __ cmp(r4, ip); __ b(ne, &check_pixel_array); // Untag the key (for checking against untagged length in the fixed array). - __ mov(r1, Operand(r1, ASR, kSmiTagSize)); + __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Compute address to store into and check array bounds. - __ add(r2, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2)); - __ ldr(ip, FieldMemOperand(r3, FixedArray::kLengthOffset)); - __ cmp(r1, Operand(ip)); + __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); + __ cmp(r4, Operand(ip)); __ b(lo, &fast); - - // Slow case: + // Slow case, handle jump to runtime. __ bind(&slow); + // Entry registers are intact. + // r0: value. + // r1: key. + // r2: receiver. GenerateRuntimeSetProperty(masm); // Check whether the elements is a pixel array. - // r0: value - // r1: index (as a smi), zero-extended. - // r3: elements array + // r4: elements map. __ bind(&check_pixel_array); __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); - __ cmp(r2, ip); + __ cmp(r4, ip); __ b(ne, &slow); // Check that the value is a smi. If a conversion is needed call into the // runtime to convert and clamp. - __ BranchOnNotSmi(r0, &slow); - __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // Untag the key. - __ ldr(ip, FieldMemOperand(r3, PixelArray::kLengthOffset)); - __ cmp(r1, Operand(ip)); + __ BranchOnNotSmi(value, &slow); + __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the key. + __ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset)); + __ cmp(r4, Operand(ip)); __ b(hs, &slow); - __ mov(r4, r0); // Save the value. - __ mov(r0, Operand(r0, ASR, kSmiTagSize)); // Untag the value. + __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value. { // Clamp the value to [0..255]. Label done; - __ tst(r0, Operand(0xFFFFFF00)); + __ tst(r5, Operand(0xFFFFFF00)); __ b(eq, &done); - __ mov(r0, Operand(0), LeaveCC, mi); // 0 if negative. - __ mov(r0, Operand(255), LeaveCC, pl); // 255 if positive. + __ mov(r5, Operand(0), LeaveCC, mi); // 0 if negative. + __ mov(r5, Operand(255), LeaveCC, pl); // 255 if positive. __ bind(&done); } - __ ldr(r2, FieldMemOperand(r3, PixelArray::kExternalPointerOffset)); - __ strb(r0, MemOperand(r2, r1)); - __ mov(r0, Operand(r4)); // Return the original value. + // Get the pointer to the external array. This clobbers elements. + __ ldr(elements, + FieldMemOperand(elements, PixelArray::kExternalPointerOffset)); + __ strb(r5, MemOperand(elements, r4)); // Elements is now external array. __ Ret(); - // Extra capacity case: Check if there is extra capacity to // perform the store and update the length. Used for adding one // element to the array by writing to array[array.length]. - // r0 == value, r1 == key, r2 == elements, r3 == object __ bind(&extra); - __ b(ne, &slow); // do not leave holes in the array - __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // untag - __ ldr(ip, FieldMemOperand(r2, Array::kLengthOffset)); - __ cmp(r1, Operand(ip)); + // Condition code from comparing key and array length is still available. + __ b(ne, &slow); // Only support writing to writing to array[array.length]. + // Check for room in the elements backing store. + __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag key. + __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); + __ cmp(r4, Operand(ip)); __ b(hs, &slow); - __ mov(r1, Operand(r1, LSL, kSmiTagSize)); // restore tag - __ add(r1, r1, Operand(1 << kSmiTagSize)); // and increment - __ str(r1, FieldMemOperand(r3, JSArray::kLengthOffset)); - __ mov(r3, Operand(r2)); - // NOTE: Computing the address to store into must take the fact - // that the key has been incremented into account. - int displacement = FixedArray::kHeaderSize - kHeapObjectTag - - ((1 << kSmiTagSize) * 2); - __ add(r2, r2, Operand(displacement)); - __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize)); + // Calculate key + 1 as smi. + ASSERT_EQ(0, kSmiTag); + __ add(r4, key, Operand(Smi::FromInt(1))); + __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); __ b(&fast); - // Array case: Get the length and the elements array from the JS // array. Check that the array is in fast mode; if it is the // length is always a smi. - // r0 == value, r3 == object __ bind(&array); - __ ldr(r2, FieldMemOperand(r3, JSObject::kElementsOffset)); - __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset)); + __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); - __ cmp(r1, ip); + __ cmp(r4, ip); __ b(ne, &slow); - // Check the key against the length in the array, compute the - // address to store into and fall through to fast case. - __ ldr(r1, MemOperand(sp)); // restore key - // r0 == value, r1 == key, r2 == elements, r3 == object. - __ ldr(ip, FieldMemOperand(r3, JSArray::kLengthOffset)); - __ cmp(r1, Operand(ip)); + // Check the key against the length in the array. + __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset)); + __ cmp(key, Operand(ip)); __ b(hs, &extra); - __ mov(r3, Operand(r2)); - __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize)); - + // Fall through to fast case. - // Fast case: Do the store. - // r0 == value, r2 == address to store into, r3 == elements __ bind(&fast); - __ str(r0, MemOperand(r2)); + // Fast case, store the value to the elements backing store. + __ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ str(value, MemOperand(r5)); // Skip write barrier if the written value is a smi. - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, &exit); + __ tst(value, Operand(kSmiTagMask)); + __ Ret(eq); // Update write barrier for the elements array address. - __ sub(r1, r2, Operand(r3)); - __ RecordWrite(r3, r1, r2); + __ sub(r4, r5, Operand(elements)); + __ RecordWrite(elements, r4, r5); - __ bind(&exit); __ Ret(); } @@ -1468,20 +1464,23 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, ExternalArrayType array_type) { // ---------- S t a t e -------------- // -- r0 : value + // -- r1 : key + // -- r2 : receiver // -- lr : return address - // -- sp[0] : key - // -- sp[1] : receiver // ----------------------------------- Label slow, check_heap_number; - // Get the key and the object from the stack. - __ ldm(ia, sp, r1.bit() | r2.bit()); // r1 = key, r2 = receiver + // Register usage. + Register value = r0; + Register key = r1; + Register receiver = r2; + // r3 mostly holds the elements array or the destination external array. // Check that the object isn't a smi. - __ BranchOnSmi(r2, &slow); + __ BranchOnSmi(receiver, &slow); - // Check that the object is a JS object. Load map into r3 - __ CompareObjectType(r2, r3, r4, FIRST_JS_OBJECT_TYPE); + // Check that the object is a JS object. Load map into r3. + __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE); __ b(le, &slow); // Check that the receiver does not require access checks. We need @@ -1491,73 +1490,70 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, __ b(ne, &slow); // Check that the key is a smi. - __ BranchOnNotSmi(r1, &slow); + __ BranchOnNotSmi(key, &slow); - // Check that the elements array is the appropriate type of - // ExternalArray. - // r0: value - // r1: index (smi) - // r2: object - __ ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset)); - __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); + // Check that the elements array is the appropriate type of ExternalArray. + __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type)); - __ cmp(r3, ip); + __ cmp(r4, ip); __ b(ne, &slow); // Check that the index is in range. - __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // Untag the index. - __ ldr(ip, FieldMemOperand(r2, ExternalArray::kLengthOffset)); - __ cmp(r1, ip); + __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index. + __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); + __ cmp(r4, ip); // Unsigned comparison catches both negative and too-large values. __ b(hs, &slow); // Handle both smis and HeapNumbers in the fast path. Go to the // runtime for all other kinds of values. - // r0: value - // r1: index (integer) - // r2: array - __ BranchOnNotSmi(r0, &check_heap_number); - __ mov(r3, Operand(r0, ASR, kSmiTagSize)); // Untag the value. - __ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset)); - - // r1: index (integer) - // r2: base pointer of external storage - // r3: value (integer) + // r3: external array. + // r4: key (integer). + __ BranchOnNotSmi(value, &check_heap_number); + __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value. + __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); + + // r3: base pointer of external storage. + // r4: key (integer). + // r5: value (integer). switch (array_type) { case kExternalByteArray: case kExternalUnsignedByteArray: - __ strb(r3, MemOperand(r2, r1, LSL, 0)); + __ strb(r5, MemOperand(r3, r4, LSL, 0)); break; case kExternalShortArray: case kExternalUnsignedShortArray: - __ strh(r3, MemOperand(r2, r1, LSL, 1)); + __ strh(r5, MemOperand(r3, r4, LSL, 1)); break; case kExternalIntArray: case kExternalUnsignedIntArray: - __ str(r3, MemOperand(r2, r1, LSL, 2)); + __ str(r5, MemOperand(r3, r4, LSL, 2)); break; case kExternalFloatArray: // Need to perform int-to-float conversion. - ConvertIntToFloat(masm, r3, r4, r5, r6); - __ str(r4, MemOperand(r2, r1, LSL, 2)); + ConvertIntToFloat(masm, r5, r6, r7, r9); + __ str(r6, MemOperand(r3, r4, LSL, 2)); break; default: UNREACHABLE(); break; } - // r0: value + // Entry registers are intact, r0 holds the value which is the return value. __ Ret(); - // r0: value - // r1: index (integer) - // r2: external array object + // r3: external array. + // r4: index (integer). __ bind(&check_heap_number); - __ CompareObjectType(r0, r3, r4, HEAP_NUMBER_TYPE); + __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE); __ b(ne, &slow); - __ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset)); + __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); + + // r3: base pointer of external storage. + // r4: key (integer). // The WebGL specification leaves the behavior of storing NaN and // +/-Infinity into integer arrays basically undefined. For more @@ -1567,13 +1563,13 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, // vldr requires offset to be a multiple of 4 so we can not // include -kHeapObjectTag into it. - __ sub(r3, r0, Operand(kHeapObjectTag)); - __ vldr(d0, r3, HeapNumber::kValueOffset); + __ sub(r5, r0, Operand(kHeapObjectTag)); + __ vldr(d0, r5, HeapNumber::kValueOffset); if (array_type == kExternalFloatArray) { __ vcvt_f32_f64(s0, d0); - __ vmov(r3, s0); - __ str(r3, MemOperand(r2, r1, LSL, 2)); + __ vmov(r5, s0); + __ str(r5, MemOperand(r3, r4, LSL, 2)); } else { Label done; @@ -1582,38 +1578,38 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, __ vcmp(d0, d0); // Move vector status bits to normal status bits. __ vmrs(v8::internal::pc); - __ mov(r3, Operand(0), LeaveCC, vs); // NaN converts to 0 + __ mov(r5, Operand(0), LeaveCC, vs); // NaN converts to 0. __ b(vs, &done); - // Test whether exponent equal to 0x7FF (infinity or NaN) - __ vmov(r4, r3, d0); + // Test whether exponent equal to 0x7FF (infinity or NaN). + __ vmov(r6, r7, d0); __ mov(r5, Operand(0x7FF00000)); - __ and_(r3, r3, Operand(r5)); - __ teq(r3, Operand(r5)); - __ mov(r3, Operand(0), LeaveCC, eq); + __ and_(r6, r6, Operand(r5)); + __ teq(r6, Operand(r5)); + __ mov(r6, Operand(0), LeaveCC, eq); - // Not infinity or NaN simply convert to int + // Not infinity or NaN simply convert to int. if (IsElementTypeSigned(array_type)) { __ vcvt_s32_f64(s0, d0, ne); } else { __ vcvt_u32_f64(s0, d0, ne); } - __ vmov(r3, s0, ne); + __ vmov(r5, s0, ne); __ bind(&done); switch (array_type) { case kExternalByteArray: case kExternalUnsignedByteArray: - __ strb(r3, MemOperand(r2, r1, LSL, 0)); + __ strb(r5, MemOperand(r3, r4, LSL, 0)); break; case kExternalShortArray: case kExternalUnsignedShortArray: - __ strh(r3, MemOperand(r2, r1, LSL, 1)); + __ strh(r5, MemOperand(r3, r4, LSL, 1)); break; case kExternalIntArray: case kExternalUnsignedIntArray: - __ str(r3, MemOperand(r2, r1, LSL, 2)); + __ str(r5, MemOperand(r3, r4, LSL, 2)); break; default: UNREACHABLE(); @@ -1621,12 +1617,12 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, } } - // r0: original value + // Entry registers are intact, r0 holds the value which is the return value. __ Ret(); } else { - // VFP3 is not available do manual conversions - __ ldr(r3, FieldMemOperand(r0, HeapNumber::kExponentOffset)); - __ ldr(r4, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); + // VFP3 is not available do manual conversions. + __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset)); + __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset)); if (array_type == kExternalFloatArray) { Label done, nan_or_infinity_or_zero; @@ -1638,106 +1634,108 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, // Test for all special exponent values: zeros, subnormal numbers, NaNs // and infinities. All these should be converted to 0. - __ mov(r5, Operand(HeapNumber::kExponentMask)); - __ and_(r6, r3, Operand(r5), SetCC); + __ mov(r7, Operand(HeapNumber::kExponentMask)); + __ and_(r9, r5, Operand(r7), SetCC); __ b(eq, &nan_or_infinity_or_zero); - __ teq(r6, Operand(r5)); - __ mov(r6, Operand(kBinary32ExponentMask), LeaveCC, eq); + __ teq(r9, Operand(r7)); + __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq); __ b(eq, &nan_or_infinity_or_zero); // Rebias exponent. - __ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift)); - __ add(r6, - r6, + __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); + __ add(r9, + r9, Operand(kBinary32ExponentBias - HeapNumber::kExponentBias)); - __ cmp(r6, Operand(kBinary32MaxExponent)); - __ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, gt); - __ orr(r3, r3, Operand(kBinary32ExponentMask), LeaveCC, gt); + __ cmp(r9, Operand(kBinary32MaxExponent)); + __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt); + __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt); __ b(gt, &done); - __ cmp(r6, Operand(kBinary32MinExponent)); - __ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, lt); + __ cmp(r9, Operand(kBinary32MinExponent)); + __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt); __ b(lt, &done); - __ and_(r7, r3, Operand(HeapNumber::kSignMask)); - __ and_(r3, r3, Operand(HeapNumber::kMantissaMask)); - __ orr(r7, r7, Operand(r3, LSL, kMantissaInHiWordShift)); - __ orr(r7, r7, Operand(r4, LSR, kMantissaInLoWordShift)); - __ orr(r3, r7, Operand(r6, LSL, kBinary32ExponentShift)); + __ and_(r7, r5, Operand(HeapNumber::kSignMask)); + __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); + __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift)); + __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift)); + __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift)); __ bind(&done); - __ str(r3, MemOperand(r2, r1, LSL, 2)); + __ str(r5, MemOperand(r3, r4, LSL, 2)); + // Entry registers are intact, r0 holds the value which is the return + // value. __ Ret(); __ bind(&nan_or_infinity_or_zero); - __ and_(r7, r3, Operand(HeapNumber::kSignMask)); - __ and_(r3, r3, Operand(HeapNumber::kMantissaMask)); - __ orr(r6, r6, r7); - __ orr(r6, r6, Operand(r3, LSL, kMantissaInHiWordShift)); - __ orr(r3, r6, Operand(r4, LSR, kMantissaInLoWordShift)); + __ and_(r7, r5, Operand(HeapNumber::kSignMask)); + __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); + __ orr(r9, r9, r7); + __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift)); + __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift)); __ b(&done); } else { - bool is_signed_type = IsElementTypeSigned(array_type); + bool is_signed_type = IsElementTypeSigned(array_type); int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt; - int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; + int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; Label done, sign; // Test for all special exponent values: zeros, subnormal numbers, NaNs // and infinities. All these should be converted to 0. - __ mov(r5, Operand(HeapNumber::kExponentMask)); - __ and_(r6, r3, Operand(r5), SetCC); - __ mov(r3, Operand(0), LeaveCC, eq); + __ mov(r7, Operand(HeapNumber::kExponentMask)); + __ and_(r9, r5, Operand(r7), SetCC); + __ mov(r5, Operand(0), LeaveCC, eq); __ b(eq, &done); - __ teq(r6, Operand(r5)); - __ mov(r3, Operand(0), LeaveCC, eq); + __ teq(r9, Operand(r7)); + __ mov(r5, Operand(0), LeaveCC, eq); __ b(eq, &done); // Unbias exponent. - __ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift)); - __ sub(r6, r6, Operand(HeapNumber::kExponentBias), SetCC); + __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); + __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC); // If exponent is negative than result is 0. - __ mov(r3, Operand(0), LeaveCC, mi); + __ mov(r5, Operand(0), LeaveCC, mi); __ b(mi, &done); - // If exponent is too big than result is minimal value - __ cmp(r6, Operand(meaningfull_bits - 1)); - __ mov(r3, Operand(min_value), LeaveCC, ge); + // If exponent is too big than result is minimal value. + __ cmp(r9, Operand(meaningfull_bits - 1)); + __ mov(r5, Operand(min_value), LeaveCC, ge); __ b(ge, &done); - __ and_(r5, r3, Operand(HeapNumber::kSignMask), SetCC); - __ and_(r3, r3, Operand(HeapNumber::kMantissaMask)); - __ orr(r3, r3, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); + __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC); + __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); + __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); - __ rsb(r6, r6, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); - __ mov(r3, Operand(r3, LSR, r6), LeaveCC, pl); + __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); + __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl); __ b(pl, &sign); - __ rsb(r6, r6, Operand(0)); - __ mov(r3, Operand(r3, LSL, r6)); - __ rsb(r6, r6, Operand(meaningfull_bits)); - __ orr(r3, r3, Operand(r4, LSR, r6)); + __ rsb(r9, r9, Operand(0)); + __ mov(r5, Operand(r5, LSL, r9)); + __ rsb(r9, r9, Operand(meaningfull_bits)); + __ orr(r5, r5, Operand(r6, LSR, r9)); __ bind(&sign); - __ teq(r5, Operand(0)); - __ rsb(r3, r3, Operand(0), LeaveCC, ne); + __ teq(r7, Operand(0)); + __ rsb(r5, r5, Operand(0), LeaveCC, ne); __ bind(&done); switch (array_type) { case kExternalByteArray: case kExternalUnsignedByteArray: - __ strb(r3, MemOperand(r2, r1, LSL, 0)); + __ strb(r5, MemOperand(r3, r4, LSL, 0)); break; case kExternalShortArray: case kExternalUnsignedShortArray: - __ strh(r3, MemOperand(r2, r1, LSL, 1)); + __ strh(r5, MemOperand(r3, r4, LSL, 1)); break; case kExternalIntArray: case kExternalUnsignedIntArray: - __ str(r3, MemOperand(r2, r1, LSL, 2)); + __ str(r5, MemOperand(r3, r4, LSL, 2)); break; default: UNREACHABLE(); @@ -1748,6 +1746,11 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, // Slow case: call runtime. __ bind(&slow); + + // Entry registers are intact. + // r0: value + // r1: key + // r2: receiver GenerateRuntimeSetProperty(masm); } @@ -1838,3 +1841,5 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) { } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/jump-target-arm.cc b/deps/v8/src/arm/jump-target-arm.cc index a13de0e2a6..3c43d168d8 100644 --- a/deps/v8/src/arm/jump-target-arm.cc +++ b/deps/v8/src/arm/jump-target-arm.cc @@ -27,6 +27,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_ARM) + #include "codegen-inl.h" #include "jump-target-inl.h" #include "register-allocator-inl.h" @@ -47,28 +49,15 @@ void JumpTarget::DoJump() { // which are still live in the C++ code. ASSERT(cgen()->HasValidEntryRegisters()); - if (is_bound()) { - // Backward jump. There already a frame expectation at the target. - ASSERT(direction_ == BIDIRECTIONAL); - cgen()->frame()->MergeTo(entry_frame_); + if (entry_frame_set_) { + // There already a frame expectation at the target. + cgen()->frame()->MergeTo(&entry_frame_); cgen()->DeleteFrame(); } else { - // Use the current frame as the expected one at the target if necessary. - if (entry_frame_ == NULL) { - entry_frame_ = cgen()->frame(); - RegisterFile empty; - cgen()->SetFrame(NULL, &empty); - } else { - cgen()->frame()->MergeTo(entry_frame_); - cgen()->DeleteFrame(); - } - - // The predicate is_linked() should be made true. Its implementation - // detects the presence of a frame pointer in the reaching_frames_ list. - if (!is_linked()) { - reaching_frames_.Add(NULL); - ASSERT(is_linked()); - } + // Clone the current frame to use as the expected one at the target. + set_entry_frame(cgen()->frame()); + RegisterFile empty; + cgen()->SetFrame(NULL, &empty); } __ jmp(&entry_label_); } @@ -77,23 +66,19 @@ void JumpTarget::DoJump() { void JumpTarget::DoBranch(Condition cc, Hint ignored) { ASSERT(cgen()->has_valid_frame()); - if (is_bound()) { - ASSERT(direction_ == BIDIRECTIONAL); + if (entry_frame_set_) { // Backward branch. We have an expected frame to merge to on the // backward edge. - cgen()->frame()->MergeTo(entry_frame_); - } else { - // Clone the current frame to use as the expected one at the target if - // necessary. - if (entry_frame_ == NULL) { - entry_frame_ = new VirtualFrame(cgen()->frame()); - } - // The predicate is_linked() should be made true. Its implementation - // detects the presence of a frame pointer in the reaching_frames_ list. - if (!is_linked()) { - reaching_frames_.Add(NULL); - ASSERT(is_linked()); + if (cc == al) { + cgen()->frame()->MergeTo(&entry_frame_); + } else { + // We can't do conditional merges yet so you have to ensure that all + // conditional branches to the JumpTarget have the same virtual frame. + ASSERT(cgen()->frame()->Equals(&entry_frame_)); } + } else { + // Clone the current frame to use as the expected one at the target. + set_entry_frame(cgen()->frame()); } __ b(cc, &entry_label_); } @@ -113,15 +98,10 @@ void JumpTarget::Call() { // Calls are always 'forward' so we use a copy of the current frame (plus // one for a return address) as the expected frame. - ASSERT(entry_frame_ == NULL); - VirtualFrame* target_frame = new VirtualFrame(cgen()->frame()); - target_frame->Adjust(1); - entry_frame_ = target_frame; - - // The predicate is_linked() should now be made true. Its implementation - // detects the presence of a frame pointer in the reaching_frames_ list. - reaching_frames_.Add(NULL); - ASSERT(is_linked()); + ASSERT(!entry_frame_set_); + VirtualFrame target_frame = *cgen()->frame(); + target_frame.Adjust(1); + set_entry_frame(&target_frame); __ bl(&entry_label_); } @@ -136,77 +116,27 @@ void JumpTarget::DoBind() { if (cgen()->has_valid_frame()) { // If there is a current frame we can use it on the fall through. - if (entry_frame_ == NULL) { - entry_frame_ = new VirtualFrame(cgen()->frame()); + if (!entry_frame_set_) { + entry_frame_ = *cgen()->frame(); + entry_frame_set_ = true; } else { - ASSERT(cgen()->frame()->Equals(entry_frame_)); + cgen()->frame()->MergeTo(&entry_frame_); } } else { // If there is no current frame we must have an entry frame which we can // copy. - ASSERT(entry_frame_ != NULL); + ASSERT(entry_frame_set_); RegisterFile empty; - cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty); - } - - // The predicate is_linked() should be made false. Its implementation - // detects the presence (or absence) of frame pointers in the - // reaching_frames_ list. If we inserted a bogus frame to make - // is_linked() true, remove it now. - if (is_linked()) { - reaching_frames_.Clear(); + cgen()->SetFrame(new VirtualFrame(&entry_frame_), &empty); } __ bind(&entry_label_); } -void BreakTarget::Jump() { - // On ARM we do not currently emit merge code for jumps, so we need to do - // it explicitly here. The only merging necessary is to drop extra - // statement state from the stack. - ASSERT(cgen()->has_valid_frame()); - int count = cgen()->frame()->height() - expected_height_; - cgen()->frame()->Drop(count); - DoJump(); -} - - -void BreakTarget::Jump(Result* arg) { - UNIMPLEMENTED(); -} - - -void BreakTarget::Bind() { -#ifdef DEBUG - // All the forward-reaching frames should have been adjusted at the - // jumps to this target. - for (int i = 0; i < reaching_frames_.length(); i++) { - ASSERT(reaching_frames_[i] == NULL || - reaching_frames_[i]->height() == expected_height_); - } -#endif - // Drop leftover statement state from the frame before merging, even - // on the fall through. This is so we can bind the return target - // with state on the frame. - if (cgen()->has_valid_frame()) { - int count = cgen()->frame()->height() - expected_height_; - // On ARM we do not currently emit merge code at binding sites, so we need - // to do it explicitly here. The only merging necessary is to drop extra - // statement state from the stack. - cgen()->frame()->Drop(count); - } - - DoBind(); -} - - -void BreakTarget::Bind(Result* arg) { - UNIMPLEMENTED(); -} - - #undef __ } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index c4b153f82e..e356d55e1b 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -27,6 +27,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_ARM) + #include "bootstrapper.h" #include "codegen-inl.h" #include "debug.h" @@ -1725,3 +1727,5 @@ void CodePatcher::Emit(Address addr) { } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index 64fe5d69c1..e8910f4860 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -26,6 +26,9 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "v8.h" + +#if defined(V8_TARGET_ARCH_ARM) + #include "unicode.h" #include "log.h" #include "ast.h" @@ -1255,3 +1258,5 @@ void RegExpCEntryStub::Generate(MacroAssembler* masm_) { #endif // V8_INTERPRETED_REGEXP }} // namespace v8::internal + +#endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/register-allocator-arm.cc b/deps/v8/src/arm/register-allocator-arm.cc index ad0c7f9d46..3b35574da3 100644 --- a/deps/v8/src/arm/register-allocator-arm.cc +++ b/deps/v8/src/arm/register-allocator-arm.cc @@ -27,6 +27,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_ARM) + #include "codegen-inl.h" #include "register-allocator-inl.h" @@ -57,3 +59,5 @@ Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() { } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index e4601f3e3f..e72a8796dc 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -29,6 +29,8 @@ #include #include "v8.h" +#if defined(V8_TARGET_ARCH_ARM) + #include "disasm.h" #include "assembler.h" #include "arm/constants-arm.h" @@ -2731,3 +2733,5 @@ uintptr_t Simulator::PopAddress() { } } // namespace assembler::arm #endif // __arm__ + +#endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 877354ccae..8001cd842a 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -27,6 +27,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_ARM) + #include "ic-inl.h" #include "codegen-inl.h" #include "stub-cache.h" @@ -506,8 +508,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED { ASSERT(callback->getter() != NULL); Label cleanup; - __ pop(scratch2); - __ Push(receiver, scratch2); + __ push(receiver); holder = stub_compiler->CheckPrototypes(holder_obj, holder, lookup->holder(), scratch1, @@ -526,9 +527,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED { __ TailCallExternalReference(ref, 5, 1); __ bind(&cleanup); - __ pop(scratch1); __ pop(scratch2); - __ push(scratch1); } } @@ -1618,15 +1617,11 @@ Object* LoadStubCompiler::CompileLoadNonexistent(String* name, JSObject* object, JSObject* last) { // ----------- S t a t e ------------- - // -- r2 : name + // -- r0 : receiver // -- lr : return address - // -- [sp] : receiver // ----------------------------------- Label miss; - // Load receiver. - __ ldr(r0, MemOperand(sp, 0)); - // Check that receiver is not a smi. __ tst(r0, Operand(kSmiTagMask)); __ b(eq, &miss); @@ -1663,14 +1658,12 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object, int index, String* name) { // ----------- S t a t e ------------- + // -- r0 : receiver // -- r2 : name // -- lr : return address - // -- [sp] : receiver // ----------------------------------- Label miss; - __ ldr(r0, MemOperand(sp, 0)); - GenerateLoadField(object, holder, r0, r3, r1, index, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::LOAD_IC); @@ -1685,13 +1678,12 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name, JSObject* holder, AccessorInfo* callback) { // ----------- S t a t e ------------- + // -- r0 : receiver // -- r2 : name // -- lr : return address - // -- [sp] : receiver // ----------------------------------- Label miss; - __ ldr(r0, MemOperand(sp, 0)); Failure* failure = Failure::InternalError(); bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1, callback, name, &miss, &failure); @@ -1710,14 +1702,12 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object, Object* value, String* name) { // ----------- S t a t e ------------- + // -- r0 : receiver // -- r2 : name // -- lr : return address - // -- [sp] : receiver // ----------------------------------- Label miss; - __ ldr(r0, MemOperand(sp, 0)); - GenerateLoadConstant(object, holder, r0, r3, r1, value, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::LOAD_IC); @@ -1731,14 +1721,12 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object, JSObject* holder, String* name) { // ----------- S t a t e ------------- + // -- r0 : receiver // -- r2 : name // -- lr : return address - // -- [sp] : receiver // ----------------------------------- Label miss; - __ ldr(r0, MemOperand(sp, 0)); - LookupResult lookup; LookupPostInterceptor(holder, name, &lookup); GenerateLoadInterceptor(object, @@ -1764,10 +1752,9 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object, String* name, bool is_dont_delete) { // ----------- S t a t e ------------- + // -- r0 : receiver // -- r2 : name // -- lr : return address - // -- r0 : receiver - // -- sp[0] : receiver // ----------------------------------- Label miss; @@ -1974,32 +1961,31 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object, String* name) { // ----------- S t a t e ------------- // -- r0 : value - // -- r2 : name + // -- r1 : key + // -- r2 : receiver // -- lr : return address - // -- [sp] : receiver // ----------------------------------- Label miss; - __ IncrementCounter(&Counters::keyed_store_field, 1, r1, r3); + __ IncrementCounter(&Counters::keyed_store_field, 1, r3, r4); // Check that the name has not changed. - __ cmp(r2, Operand(Handle(name))); + __ cmp(r1, Operand(Handle(name))); __ b(ne, &miss); - // Load receiver from the stack. - __ ldr(r3, MemOperand(sp)); - // r1 is used as scratch register, r3 and r2 might be clobbered. + // r3 is used as scratch register. r1 and r2 keep their values if a jump to + // the miss label is generated. GenerateStoreField(masm(), object, index, transition, - r3, r2, r1, + r2, r1, r3, &miss); __ bind(&miss); - __ DecrementCounter(&Counters::keyed_store_field, 1, r1, r3); - __ mov(r2, Operand(Handle(name))); // restore name register. + __ DecrementCounter(&Counters::keyed_store_field, 1, r3, r4); Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss)); + __ Jump(ic, RelocInfo::CODE_TARGET); // Return the generated code. @@ -2153,3 +2139,5 @@ Object* ConstructStubCompiler::CompileConstructStub( #undef __ } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/virtual-frame-arm-inl.h b/deps/v8/src/arm/virtual-frame-arm-inl.h new file mode 100644 index 0000000000..a97cde4f75 --- /dev/null +++ b/deps/v8/src/arm/virtual-frame-arm-inl.h @@ -0,0 +1,53 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_VIRTUAL_FRAME_ARM_INL_H_ +#define V8_VIRTUAL_FRAME_ARM_INL_H_ + +#include "assembler-arm.h" +#include "virtual-frame-arm.h" + +namespace v8 { +namespace internal { + +// These VirtualFrame methods should actually be in a virtual-frame-arm-inl.h +// file if such a thing existed. +MemOperand VirtualFrame::ParameterAt(int index) { + // Index -1 corresponds to the receiver. + ASSERT(-1 <= index); // -1 is the receiver. + ASSERT(index <= parameter_count()); + return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize); +} + + // The receiver frame slot. +MemOperand VirtualFrame::Receiver() { + return ParameterAt(-1); +} + +} } // namespace v8::internal + +#endif // V8_VIRTUAL_FRAME_ARM_INL_H_ diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc index 0ec6e203d2..3acd2df478 100644 --- a/deps/v8/src/arm/virtual-frame-arm.cc +++ b/deps/v8/src/arm/virtual-frame-arm.cc @@ -27,6 +27,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_ARM) + #include "codegen-inl.h" #include "register-allocator-inl.h" #include "scopes.h" @@ -72,8 +74,15 @@ void VirtualFrame::PopToR0() { void VirtualFrame::MergeTo(VirtualFrame* expected) { if (Equals(expected)) return; + MergeTOSTo(expected->top_of_stack_state_); + ASSERT(register_allocation_map_ == expected->register_allocation_map_); +} + + +void VirtualFrame::MergeTOSTo( + VirtualFrame::TopOfStack expected_top_of_stack_state) { #define CASE_NUMBER(a, b) ((a) * TOS_STATES + (b)) - switch (CASE_NUMBER(top_of_stack_state_, expected->top_of_stack_state_)) { + switch (CASE_NUMBER(top_of_stack_state_, expected_top_of_stack_state)) { case CASE_NUMBER(NO_TOS_REGISTERS, NO_TOS_REGISTERS): break; case CASE_NUMBER(NO_TOS_REGISTERS, R0_TOS): @@ -154,7 +163,7 @@ void VirtualFrame::MergeTo(VirtualFrame* expected) { UNREACHABLE(); #undef CASE_NUMBER } - ASSERT(register_allocation_map_ == expected->register_allocation_map_); + top_of_stack_state_ = expected_top_of_stack_state; } @@ -300,7 +309,8 @@ void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id, void VirtualFrame::CallLoadIC(Handle name, RelocInfo::Mode mode) { Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize)); - SpillAllButCopyTOSToR0(); + PopToR0(); + SpillAll(); __ mov(r2, Operand(name)); CallCodeObject(ic, mode, 0); } @@ -330,8 +340,10 @@ void VirtualFrame::CallKeyedLoadIC() { void VirtualFrame::CallKeyedStoreIC() { - ASSERT(SpilledScope::is_spilled()); Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + PopToR1R0(); + SpillAll(); + EmitPop(r2); CallCodeObject(ic, RelocInfo::CODE_TARGET, 0); } @@ -418,7 +430,7 @@ void VirtualFrame::Pop() { void VirtualFrame::EmitPop(Register reg) { - ASSERT(!is_used(reg)); + ASSERT(!is_used(RegisterAllocator::ToNumber(reg))); if (top_of_stack_state_ == NO_TOS_REGISTERS) { __ pop(reg); } else { @@ -498,36 +510,40 @@ Register VirtualFrame::Peek() { void VirtualFrame::Dup() { - AssertIsNotSpilled(); - switch (top_of_stack_state_) { - case NO_TOS_REGISTERS: - __ ldr(r0, MemOperand(sp, 0)); - top_of_stack_state_ = R0_TOS; - break; - case R0_TOS: - __ mov(r1, r0); - // r0 and r1 contains the same value. Prefer a state with r0 holding TOS. - top_of_stack_state_ = R0_R1_TOS; - break; - case R1_TOS: - __ mov(r0, r1); - // r0 and r1 contains the same value. Prefer a state with r0 holding TOS. - top_of_stack_state_ = R0_R1_TOS; - break; - case R0_R1_TOS: - __ push(r1); - __ mov(r1, r0); - // r0 and r1 contains the same value. Prefer a state with r0 holding TOS. - top_of_stack_state_ = R0_R1_TOS; - break; - case R1_R0_TOS: - __ push(r0); - __ mov(r0, r1); - // r0 and r1 contains the same value. Prefer a state with r0 holding TOS. - top_of_stack_state_ = R0_R1_TOS; - break; - default: - UNREACHABLE(); + if (SpilledScope::is_spilled()) { + __ ldr(ip, MemOperand(sp, 0)); + __ push(ip); + } else { + switch (top_of_stack_state_) { + case NO_TOS_REGISTERS: + __ ldr(r0, MemOperand(sp, 0)); + top_of_stack_state_ = R0_TOS; + break; + case R0_TOS: + __ mov(r1, r0); + // r0 and r1 contains the same value. Prefer state with r0 holding TOS. + top_of_stack_state_ = R0_R1_TOS; + break; + case R1_TOS: + __ mov(r0, r1); + // r0 and r1 contains the same value. Prefer state with r0 holding TOS. + top_of_stack_state_ = R0_R1_TOS; + break; + case R0_R1_TOS: + __ push(r1); + __ mov(r1, r0); + // r0 and r1 contains the same value. Prefer state with r0 holding TOS. + top_of_stack_state_ = R0_R1_TOS; + break; + case R1_R0_TOS: + __ push(r0); + __ mov(r0, r1); + // r0 and r1 contains the same value. Prefer state with r0 holding TOS. + top_of_stack_state_ = R0_R1_TOS; + break; + default: + UNREACHABLE(); + } } element_count_++; } @@ -576,7 +592,6 @@ Register VirtualFrame::PopToRegister(Register but_not_to_this_one) { ASSERT(but_not_to_this_one.is(r0) || but_not_to_this_one.is(r1) || but_not_to_this_one.is(no_reg)); - AssertIsNotSpilled(); element_count_--; if (top_of_stack_state_ == NO_TOS_REGISTERS) { if (but_not_to_this_one.is(r0)) { @@ -628,6 +643,39 @@ void VirtualFrame::EmitPush(Register reg) { } +void VirtualFrame::SetElementAt(Register reg, int this_far_down) { + if (this_far_down == 0) { + Pop(); + Register dest = GetTOSRegister(); + if (dest.is(reg)) { + // We already popped one item off the top of the stack. If the only + // free register is the one we were asked to push then we have been + // asked to push a register that was already in use, which cannot + // happen. It therefore folows that there are two free TOS registers: + ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS); + dest = dest.is(r0) ? r1 : r0; + } + __ mov(dest, reg); + EmitPush(dest); + } else if (this_far_down == 1) { + int virtual_elements = kVirtualElements[top_of_stack_state_]; + if (virtual_elements < 2) { + __ str(reg, ElementAt(this_far_down)); + } else { + ASSERT(virtual_elements == 2); + ASSERT(!reg.is(r0)); + ASSERT(!reg.is(r1)); + Register dest = kBottomRegister[top_of_stack_state_]; + __ mov(dest, reg); + } + } else { + ASSERT(this_far_down >= 2); + ASSERT(kVirtualElements[top_of_stack_state_] <= 2); + __ str(reg, ElementAt(this_far_down)); + } +} + + Register VirtualFrame::GetTOSRegister() { if (SpilledScope::is_spilled()) return r0; @@ -710,3 +758,5 @@ void VirtualFrame::SpillAll() { #undef __ } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h index b255929e2b..9471d61e1b 100644 --- a/deps/v8/src/arm/virtual-frame-arm.h +++ b/deps/v8/src/arm/virtual-frame-arm.h @@ -29,11 +29,14 @@ #define V8_ARM_VIRTUAL_FRAME_ARM_H_ #include "register-allocator.h" -#include "scopes.h" namespace v8 { namespace internal { +// This dummy class is only used to create invalid virtual frames. +extern class InvalidVirtualFrameInitializer {}* kInvalidVirtualFrameInitializer; + + // ------------------------------------------------------------------------- // Virtual frames // @@ -82,26 +85,8 @@ class VirtualFrame : public ZoneObject { // is not spilled, ie. where register allocation occurs. Eventually // when RegisterAllocationScope is ubiquitous it can be removed // along with the (by then unused) SpilledScope class. - explicit RegisterAllocationScope(CodeGenerator* cgen) - : cgen_(cgen), - old_is_spilled_(SpilledScope::is_spilled_) { - SpilledScope::is_spilled_ = false; - if (old_is_spilled_) { - VirtualFrame* frame = cgen->frame(); - if (frame != NULL) { - frame->AssertIsSpilled(); - } - } - } - ~RegisterAllocationScope() { - SpilledScope::is_spilled_ = old_is_spilled_; - if (old_is_spilled_) { - VirtualFrame* frame = cgen_->frame(); - if (frame != NULL) { - frame->SpillAll(); - } - } - } + inline explicit RegisterAllocationScope(CodeGenerator* cgen); + inline ~RegisterAllocationScope(); private: CodeGenerator* cgen_; @@ -116,19 +101,20 @@ class VirtualFrame : public ZoneObject { // Construct an initial virtual frame on entry to a JS function. inline VirtualFrame(); + // Construct an invalid virtual frame, used by JumpTargets. + inline VirtualFrame(InvalidVirtualFrameInitializer* dummy); + // Construct a virtual frame as a clone of an existing one. explicit inline VirtualFrame(VirtualFrame* original); - CodeGenerator* cgen() { return CodeGeneratorScope::Current(); } - MacroAssembler* masm() { return cgen()->masm(); } + inline CodeGenerator* cgen(); + inline MacroAssembler* masm(); // The number of elements on the virtual frame. int element_count() { return element_count_; } // The height of the virtual expression stack. - int height() { - return element_count() - expression_base_index(); - } + inline int height(); bool is_used(int num) { switch (num) { @@ -160,10 +146,6 @@ class VirtualFrame : public ZoneObject { } } - bool is_used(Register reg) { - return is_used(RegisterAllocator::ToNumber(reg)); - } - // Add extra in-memory elements to the top of the frame to match an actual // frame (eg, the frame after an exception handler is pushed). No code is // emitted. @@ -247,16 +229,13 @@ class VirtualFrame : public ZoneObject { // An element of the expression stack as an assembly operand. MemOperand ElementAt(int index) { - AssertIsSpilled(); - return MemOperand(sp, index * kPointerSize); + int adjusted_index = index - kVirtualElements[top_of_stack_state_]; + ASSERT(adjusted_index >= 0); + return MemOperand(sp, adjusted_index * kPointerSize); } // A frame-allocated local as an assembly operand. - MemOperand LocalAt(int index) { - ASSERT(0 <= index); - ASSERT(index < local_count()); - return MemOperand(fp, kLocal0Offset - index * kPointerSize); - } + inline MemOperand LocalAt(int index); // Push the address of the receiver slot on the frame. void PushReceiverSlotAddress(); @@ -268,26 +247,17 @@ class VirtualFrame : public ZoneObject { MemOperand Context() { return MemOperand(fp, kContextOffset); } // A parameter as an assembly operand. - MemOperand ParameterAt(int index) { - // Index -1 corresponds to the receiver. - ASSERT(-1 <= index); // -1 is the receiver. - ASSERT(index <= parameter_count()); - return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize); - } + inline MemOperand ParameterAt(int index); // The receiver frame slot. - MemOperand Receiver() { return ParameterAt(-1); } + inline MemOperand Receiver(); // Push a try-catch or try-finally handler on top of the virtual frame. void PushTryHandler(HandlerType type); // Call stub given the number of arguments it expects on (and // removes from) the stack. - void CallStub(CodeStub* stub, int arg_count) { - if (arg_count != 0) Forget(arg_count); - ASSERT(cgen()->HasValidEntryRegisters()); - masm()->CallStub(stub); - } + inline void CallStub(CodeStub* stub, int arg_count); // Call JS function from top of the stack with arguments // taken from the stack. @@ -308,7 +278,8 @@ class VirtualFrame : public ZoneObject { InvokeJSFlags flag, int arg_count); - // Call load IC. Receiver is on the stack. Result is returned in r0. + // Call load IC. Receiver is on the stack and is consumed. Result is returned + // in r0. void CallLoadIC(Handle name, RelocInfo::Mode mode); // Call store IC. If the load is contextual, value is found on top of the @@ -320,8 +291,8 @@ class VirtualFrame : public ZoneObject { // Result is returned in r0. void CallKeyedLoadIC(); - // Call keyed store IC. Key and receiver are on the stack and the value is in - // r0. Result is returned in r0. + // Call keyed store IC. Value, key and receiver are on the stack. All three + // are consumed. Result is returned in r0. void CallKeyedStoreIC(); // Call into an IC stub given the number of arguments it removes @@ -386,6 +357,12 @@ class VirtualFrame : public ZoneObject { void EmitPush(MemOperand operand); void EmitPushRoot(Heap::RootListIndex index); + // Overwrite the nth thing on the stack. If the nth position is in a + // register then this turns into a mov, otherwise an str. Afterwards + // you can still use the register even if it is a register that can be + // used for TOS (r0 or r1). + void SetElementAt(Register reg, int this_far_down); + // Get a register which is free and which must be immediately used to // push on the top of the stack. Register GetTOSRegister(); @@ -449,13 +426,13 @@ class VirtualFrame : public ZoneObject { int stack_pointer() { return element_count_ - 1; } // The number of frame-allocated locals and parameters respectively. - int parameter_count() { return cgen()->scope()->num_parameters(); } - int local_count() { return cgen()->scope()->num_stack_slots(); } + inline int parameter_count(); + inline int local_count(); // The index of the element that is at the processor's frame pointer // (the fp register). The parameters, receiver, function, and context // are below the frame pointer. - int frame_pointer() { return parameter_count() + 3; } + inline int frame_pointer(); // The index of the first parameter. The receiver lies below the first // parameter. @@ -463,26 +440,22 @@ class VirtualFrame : public ZoneObject { // The index of the context slot in the frame. It is immediately // below the frame pointer. - int context_index() { return frame_pointer() - 1; } + inline int context_index(); // The index of the function slot in the frame. It is below the frame // pointer and context slot. - int function_index() { return frame_pointer() - 2; } + inline int function_index(); // The index of the first local. Between the frame pointer and the // locals lies the return address. - int local0_index() { return frame_pointer() + 2; } + inline int local0_index(); // The index of the base of the expression stack. - int expression_base_index() { return local0_index() + local_count(); } + inline int expression_base_index(); // Convert a frame index into a frame pointer relative offset into the // actual stack. - int fp_relative(int index) { - ASSERT(index < element_count()); - ASSERT(frame_pointer() < element_count()); // FP is on the frame. - return (frame_pointer() - index) * kPointerSize; - } + inline int fp_relative(int index); // Spill all elements in registers. Spill the top spilled_args elements // on the frame. Sync all other frame elements. @@ -494,10 +467,13 @@ class VirtualFrame : public ZoneObject { // onto the physical stack and made free. void EnsureOneFreeTOSRegister(); + // Emit instructions to get the top of stack state from where we are to where + // we want to be. + void MergeTOSTo(TopOfStack expected_state); + inline bool Equals(VirtualFrame* other); friend class JumpTarget; - friend class DeferredCode; }; diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index 87f363b0c0..871ca86eda 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -424,8 +424,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) { return "no reloc"; case RelocInfo::EMBEDDED_OBJECT: return "embedded object"; - case RelocInfo::EMBEDDED_STRING: - return "embedded string"; case RelocInfo::CONSTRUCT_CALL: return "code target (js construct call)"; case RelocInfo::CODE_TARGET_CONTEXT: @@ -508,7 +506,6 @@ void RelocInfo::Verify() { ASSERT(code->address() == HeapObject::cast(found)->address()); break; } - case RelocInfo::EMBEDDED_STRING: case RUNTIME_ENTRY: case JS_RETURN: case COMMENT: diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index 5d03c1f854..f2a6c8be39 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -121,7 +121,6 @@ class RelocInfo BASE_EMBEDDED { DEBUG_BREAK, CODE_TARGET, // code target which is not any of the above. EMBEDDED_OBJECT, - EMBEDDED_STRING, // Everything after runtime_entry (inclusive) is not GC'ed. RUNTIME_ENTRY, @@ -137,7 +136,7 @@ class RelocInfo BASE_EMBEDDED { NUMBER_OF_MODES, // must be no greater than 14 - see RelocInfoWriter NONE, // never recorded LAST_CODE_ENUM = CODE_TARGET, - LAST_GCED_ENUM = EMBEDDED_STRING + LAST_GCED_ENUM = EMBEDDED_OBJECT }; @@ -185,6 +184,11 @@ class RelocInfo BASE_EMBEDDED { // Apply a relocation by delta bytes INLINE(void apply(intptr_t delta)); + // Is the pointer this relocation info refers to coded like a plain pointer + // or is it strange in some way (eg relative or patched into a series of + // instructions). + bool IsCodedSpecially(); + // Read/modify the code target in the branch/call instruction // this relocation applies to; // can only be called if IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY diff --git a/deps/v8/src/ast-inl.h b/deps/v8/src/ast-inl.h new file mode 100644 index 0000000000..2b5d7c472b --- /dev/null +++ b/deps/v8/src/ast-inl.h @@ -0,0 +1,79 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#include "ast.h" + +namespace v8 { +namespace internal { + +BreakableStatement::BreakableStatement(ZoneStringList* labels, Type type) + : labels_(labels), type_(type) { + ASSERT(labels == NULL || labels->length() > 0); +} + + +SwitchStatement::SwitchStatement(ZoneStringList* labels) + : BreakableStatement(labels, TARGET_FOR_ANONYMOUS), + tag_(NULL), cases_(NULL) { +} + + +IterationStatement::IterationStatement(ZoneStringList* labels) + : BreakableStatement(labels, TARGET_FOR_ANONYMOUS), body_(NULL) { +} + + +Block::Block(ZoneStringList* labels, int capacity, bool is_initializer_block) + : BreakableStatement(labels, TARGET_FOR_NAMED_ONLY), + statements_(capacity), + is_initializer_block_(is_initializer_block) { +} + + +ForStatement::ForStatement(ZoneStringList* labels) + : IterationStatement(labels), + init_(NULL), + cond_(NULL), + next_(NULL), + may_have_function_literal_(true), + loop_variable_(NULL), + peel_this_loop_(false) { +} + + +ForInStatement::ForInStatement(ZoneStringList* labels) + : IterationStatement(labels), each_(NULL), enumerable_(NULL) { +} + + +DoWhileStatement::DoWhileStatement(ZoneStringList* labels) + : IterationStatement(labels), cond_(NULL), condition_position_(-1) { +} + +} } // namespace v8::internal diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc index 75b2945d9c..92df990063 100644 --- a/deps/v8/src/ast.cc +++ b/deps/v8/src/ast.cc @@ -32,6 +32,8 @@ #include "parser.h" #include "scopes.h" #include "string-stream.h" +#include "ast-inl.h" +#include "jump-target-inl.h" namespace v8 { namespace internal { @@ -786,6 +788,13 @@ Block::Block(Block* other, ZoneList* statements) } +WhileStatement::WhileStatement(ZoneStringList* labels) + : IterationStatement(labels), + cond_(NULL), + may_have_function_literal_(true) { +} + + ExpressionStatement::ExpressionStatement(ExpressionStatement* other, Expression* expression) : Statement(other), expression_(expression) {} @@ -809,6 +818,11 @@ IterationStatement::IterationStatement(IterationStatement* other, : BreakableStatement(other), body_(body) {} +CaseClause::CaseClause(Expression* label, ZoneList* statements) + : label_(label), statements_(statements) { +} + + ForStatement::ForStatement(ForStatement* other, Statement* init, Expression* cond, diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index dfc08ee071..a3a97341dd 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -351,10 +351,7 @@ class BreakableStatement: public Statement { bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; } protected: - BreakableStatement(ZoneStringList* labels, Type type) - : labels_(labels), type_(type) { - ASSERT(labels == NULL || labels->length() > 0); - } + inline BreakableStatement(ZoneStringList* labels, Type type); explicit BreakableStatement(BreakableStatement* other); @@ -367,10 +364,7 @@ class BreakableStatement: public Statement { class Block: public BreakableStatement { public: - Block(ZoneStringList* labels, int capacity, bool is_initializer_block) - : BreakableStatement(labels, TARGET_FOR_NAMED_ONLY), - statements_(capacity), - is_initializer_block_(is_initializer_block) { } + inline Block(ZoneStringList* labels, int capacity, bool is_initializer_block); // Construct a clone initialized from the original block and // a deep copy of all statements of the original block. @@ -437,8 +431,7 @@ class IterationStatement: public BreakableStatement { BreakTarget* continue_target() { return &continue_target_; } protected: - explicit IterationStatement(ZoneStringList* labels) - : BreakableStatement(labels, TARGET_FOR_ANONYMOUS), body_(NULL) { } + explicit inline IterationStatement(ZoneStringList* labels); // Construct a clone initialized from original and // a deep copy of the original body. @@ -456,9 +449,7 @@ class IterationStatement: public BreakableStatement { class DoWhileStatement: public IterationStatement { public: - explicit DoWhileStatement(ZoneStringList* labels) - : IterationStatement(labels), cond_(NULL), condition_position_(-1) { - } + explicit inline DoWhileStatement(ZoneStringList* labels); void Initialize(Expression* cond, Statement* body) { IterationStatement::Initialize(body); @@ -482,11 +473,7 @@ class DoWhileStatement: public IterationStatement { class WhileStatement: public IterationStatement { public: - explicit WhileStatement(ZoneStringList* labels) - : IterationStatement(labels), - cond_(NULL), - may_have_function_literal_(true) { - } + explicit WhileStatement(ZoneStringList* labels); void Initialize(Expression* cond, Statement* body) { IterationStatement::Initialize(body); @@ -511,14 +498,7 @@ class WhileStatement: public IterationStatement { class ForStatement: public IterationStatement { public: - explicit ForStatement(ZoneStringList* labels) - : IterationStatement(labels), - init_(NULL), - cond_(NULL), - next_(NULL), - may_have_function_literal_(true), - loop_variable_(NULL), - peel_this_loop_(false) {} + explicit inline ForStatement(ZoneStringList* labels); // Construct a for-statement initialized from another for-statement // and deep copies of all parts of the original statement. @@ -574,8 +554,7 @@ class ForStatement: public IterationStatement { class ForInStatement: public IterationStatement { public: - explicit ForInStatement(ZoneStringList* labels) - : IterationStatement(labels), each_(NULL), enumerable_(NULL) { } + explicit inline ForInStatement(ZoneStringList* labels); void Initialize(Expression* each, Expression* enumerable, Statement* body) { IterationStatement::Initialize(body); @@ -691,8 +670,7 @@ class WithExitStatement: public Statement { class CaseClause: public ZoneObject { public: - CaseClause(Expression* label, ZoneList* statements) - : label_(label), statements_(statements) { } + CaseClause(Expression* label, ZoneList* statements); bool is_default() const { return label_ == NULL; } Expression* label() const { @@ -711,9 +689,7 @@ class CaseClause: public ZoneObject { class SwitchStatement: public BreakableStatement { public: - explicit SwitchStatement(ZoneStringList* labels) - : BreakableStatement(labels, TARGET_FOR_ANONYMOUS), - tag_(NULL), cases_(NULL) { } + explicit inline SwitchStatement(ZoneStringList* labels); void Initialize(Expression* tag, ZoneList* cases) { tag_ = tag; diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index df1e98a66b..087413118f 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -1753,8 +1753,8 @@ Genesis::Genesis(Handle global_object, CreateNewGlobals(global_template, global_object, &inner_global); HookUpGlobalProxy(inner_global, global_proxy); InitializeGlobal(inner_global, empty_function); - if (!InstallNatives()) return; InstallJSFunctionResultCaches(); + if (!InstallNatives()) return; MakeFunctionInstancePrototypeWritable(); diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index 4971275792..9a0fbd2704 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -330,22 +330,19 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) { } -static bool ArrayPrototypeHasNoElements() { +static bool ArrayPrototypeHasNoElements(Context* global_context, + JSObject* array_proto) { // This method depends on non writability of Object and Array prototype // fields. - Context* global_context = Top::context()->global_context(); - // Array.prototype - JSObject* proto = - JSObject::cast(global_context->array_function()->prototype()); - if (proto->elements() != Heap::empty_fixed_array()) return false; + if (array_proto->elements() != Heap::empty_fixed_array()) return false; // Hidden prototype - proto = JSObject::cast(proto->GetPrototype()); - ASSERT(proto->elements() == Heap::empty_fixed_array()); + array_proto = JSObject::cast(array_proto->GetPrototype()); + ASSERT(array_proto->elements() == Heap::empty_fixed_array()); // Object.prototype - proto = JSObject::cast(proto->GetPrototype()); - if (proto != global_context->initial_object_prototype()) return false; - if (proto->elements() != Heap::empty_fixed_array()) return false; - ASSERT(proto->GetPrototype()->IsNull()); + array_proto = JSObject::cast(array_proto->GetPrototype()); + if (array_proto != global_context->initial_object_prototype()) return false; + if (array_proto->elements() != Heap::empty_fixed_array()) return false; + ASSERT(array_proto->GetPrototype()->IsNull()); return true; } @@ -368,6 +365,18 @@ static bool IsJSArrayWithFastElements(Object* receiver, } +static bool IsFastElementMovingAllowed(Object* receiver, + FixedArray** elements) { + if (!IsJSArrayWithFastElements(receiver, elements)) return false; + + Context* global_context = Top::context()->global_context(); + JSObject* array_proto = + JSObject::cast(global_context->array_function()->prototype()); + if (JSArray::cast(receiver)->GetPrototype() != array_proto) return false; + return ArrayPrototypeHasNoElements(global_context, array_proto); +} + + static Object* CallJsBuiltin(const char* name, BuiltinArguments args) { HandleScope handleScope; @@ -465,11 +474,7 @@ BUILTIN(ArrayPop) { return top; } - // Remember to check the prototype chain. - JSFunction* array_function = - Top::context()->global_context()->array_function(); - JSObject* prototype = JSObject::cast(array_function->prototype()); - top = prototype->GetElement(len - 1); + top = array->GetPrototype()->GetElement(len - 1); return top; } @@ -478,8 +483,7 @@ BUILTIN(ArrayPop) { BUILTIN(ArrayShift) { Object* receiver = *args.receiver(); FixedArray* elms = NULL; - if (!IsJSArrayWithFastElements(receiver, &elms) - || !ArrayPrototypeHasNoElements()) { + if (!IsFastElementMovingAllowed(receiver, &elms)) { return CallJsBuiltin("ArrayShift", args); } JSArray* array = JSArray::cast(receiver); @@ -515,8 +519,7 @@ BUILTIN(ArrayShift) { BUILTIN(ArrayUnshift) { Object* receiver = *args.receiver(); FixedArray* elms = NULL; - if (!IsJSArrayWithFastElements(receiver, &elms) - || !ArrayPrototypeHasNoElements()) { + if (!IsFastElementMovingAllowed(receiver, &elms)) { return CallJsBuiltin("ArrayUnshift", args); } JSArray* array = JSArray::cast(receiver); @@ -565,8 +568,7 @@ BUILTIN(ArrayUnshift) { BUILTIN(ArraySlice) { Object* receiver = *args.receiver(); FixedArray* elms = NULL; - if (!IsJSArrayWithFastElements(receiver, &elms) - || !ArrayPrototypeHasNoElements()) { + if (!IsFastElementMovingAllowed(receiver, &elms)) { return CallJsBuiltin("ArraySlice", args); } JSArray* array = JSArray::cast(receiver); @@ -635,8 +637,7 @@ BUILTIN(ArraySlice) { BUILTIN(ArraySplice) { Object* receiver = *args.receiver(); FixedArray* elms = NULL; - if (!IsJSArrayWithFastElements(receiver, &elms) - || !ArrayPrototypeHasNoElements()) { + if (!IsFastElementMovingAllowed(receiver, &elms)) { return CallJsBuiltin("ArraySplice", args); } JSArray* array = JSArray::cast(receiver); @@ -788,7 +789,10 @@ BUILTIN(ArraySplice) { BUILTIN(ArrayConcat) { - if (!ArrayPrototypeHasNoElements()) { + Context* global_context = Top::context()->global_context(); + JSObject* array_proto = + JSObject::cast(global_context->array_function()->prototype()); + if (!ArrayPrototypeHasNoElements(global_context, array_proto)) { return CallJsBuiltin("ArrayConcat", args); } @@ -798,7 +802,8 @@ BUILTIN(ArrayConcat) { int result_len = 0; for (int i = 0; i < n_arguments; i++) { Object* arg = args[i]; - if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements()) { + if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements() + || JSArray::cast(arg)->GetPrototype() != array_proto) { return CallJsBuiltin("ArrayConcat", args); } diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h index a5bb31f141..358c6fccd3 100644 --- a/deps/v8/src/codegen.h +++ b/deps/v8/src/codegen.h @@ -28,7 +28,6 @@ #ifndef V8_CODEGEN_H_ #define V8_CODEGEN_H_ -#include "ast.h" #include "code-stubs.h" #include "runtime.h" #include "type-info.h" @@ -115,7 +114,7 @@ namespace internal { F(CharFromCode, 1, 1) \ F(ObjectEquals, 2, 1) \ F(Log, 3, 1) \ - F(RandomHeapNumber, 0, 1) \ + F(RandomHeapNumber, 0, 1) \ F(IsObject, 1, 1) \ F(IsFunction, 1, 1) \ F(IsUndetectableObject, 1, 1) \ diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index 901f2186a0..27d4835dcf 100755 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -120,7 +120,21 @@ static Handle MakeCode(Handle context, CompilationInfo* info) { ? info->scope()->is_global_scope() : (shared->is_toplevel() || shared->try_full_codegen()); - if (FLAG_always_full_compiler || (FLAG_full_compiler && is_run_once)) { + bool force_full_compiler = false; +#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64) + // On ia32 the full compiler can compile all code whereas the other platforms + // the constructs supported is checked by the associated syntax checker. When + // --always-full-compiler is used on ia32 the syntax checker is still in + // effect, but there is a special flag --force-full-compiler to ignore the + // syntax checker completely and use the full compiler for all code. Also + // when debugging on ia32 the full compiler will be used for all code. + force_full_compiler = + Debugger::IsDebuggerActive() || FLAG_force_full_compiler; +#endif + + if (force_full_compiler) { + return FullCodeGenerator::MakeCode(info); + } else if (FLAG_always_full_compiler || (FLAG_full_compiler && is_run_once)) { FullCodeGenSyntaxChecker checker; checker.Check(function); if (checker.has_supported_syntax()) { diff --git a/deps/v8/src/cpu-profiler-inl.h b/deps/v8/src/cpu-profiler-inl.h index e454a9a93c..9ef6841dc6 100644 --- a/deps/v8/src/cpu-profiler-inl.h +++ b/deps/v8/src/cpu-profiler-inl.h @@ -54,7 +54,7 @@ void CodeDeleteEventRecord::UpdateCodeMap(CodeMap* code_map) { void CodeAliasEventRecord::UpdateCodeMap(CodeMap* code_map) { - code_map->AddAlias(alias, start); + code_map->AddAlias(start, entry, code_start); } diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc index ed3f6925e0..52a891f925 100644 --- a/deps/v8/src/cpu-profiler.cc +++ b/deps/v8/src/cpu-profiler.cc @@ -141,13 +141,15 @@ void ProfilerEventsProcessor::CodeDeleteEvent(Address from) { void ProfilerEventsProcessor::FunctionCreateEvent(Address alias, - Address start) { + Address start, + int security_token_id) { CodeEventsContainer evt_rec; CodeAliasEventRecord* rec = &evt_rec.CodeAliasEventRecord_; rec->type = CodeEventRecord::CODE_ALIAS; rec->order = ++enqueue_order_; - rec->alias = alias; - rec->start = start; + rec->start = alias; + rec->entry = generator_->NewCodeEntry(security_token_id); + rec->code_start = start; events_buffer_.Enqueue(evt_rec); } @@ -257,26 +259,30 @@ CpuProfile* CpuProfiler::StopProfiling(const char* title) { } -CpuProfile* CpuProfiler::StopProfiling(String* title) { - return is_profiling() ? singleton_->StopCollectingProfile(title) : NULL; +CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) { + return is_profiling() ? + singleton_->StopCollectingProfile(security_token, title) : NULL; } int CpuProfiler::GetProfilesCount() { ASSERT(singleton_ != NULL); - return singleton_->profiles_->profiles()->length(); + // The count of profiles doesn't depend on a security token. + return singleton_->profiles_->Profiles(CodeEntry::kNoSecurityToken)->length(); } -CpuProfile* CpuProfiler::GetProfile(int index) { +CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) { ASSERT(singleton_ != NULL); - return singleton_->profiles_->profiles()->at(index); + const int token = singleton_->token_enumerator_->GetTokenId(security_token); + return singleton_->profiles_->Profiles(token)->at(index); } -CpuProfile* CpuProfiler::FindProfile(unsigned uid) { +CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) { ASSERT(singleton_ != NULL); - return singleton_->profiles_->GetProfile(uid); + const int token = singleton_->token_enumerator_->GetTokenId(security_token); + return singleton_->profiles_->GetProfile(token, uid); } @@ -348,8 +354,15 @@ void CpuProfiler::CodeDeleteEvent(Address from) { void CpuProfiler::FunctionCreateEvent(JSFunction* function) { + int security_token_id = CodeEntry::kNoSecurityToken; + if (function->unchecked_context()->IsContext()) { + security_token_id = singleton_->token_enumerator_->GetTokenId( + function->context()->global_context()->security_token()); + } singleton_->processor_->FunctionCreateEvent( - function->address(), function->code()->address()); + function->address(), + function->code()->address(), + security_token_id); } @@ -388,12 +401,14 @@ void CpuProfiler::SetterCallbackEvent(String* name, Address entry_point) { CpuProfiler::CpuProfiler() : profiles_(new CpuProfilesCollection()), next_profile_uid_(1), + token_enumerator_(new TokenEnumerator()), generator_(NULL), processor_(NULL) { } CpuProfiler::~CpuProfiler() { + delete token_enumerator_; delete profiles_; } @@ -438,7 +453,9 @@ void CpuProfiler::StartProcessorIfNotStarted() { CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) { const double actual_sampling_rate = generator_->actual_sampling_rate(); StopProcessorIfLastProfile(); - CpuProfile* result = profiles_->StopProfiling(title, actual_sampling_rate); + CpuProfile* result = profiles_->StopProfiling(CodeEntry::kNoSecurityToken, + title, + actual_sampling_rate); if (result != NULL) { result->Print(); } @@ -446,10 +463,12 @@ CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) { } -CpuProfile* CpuProfiler::StopCollectingProfile(String* title) { +CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token, + String* title) { const double actual_sampling_rate = generator_->actual_sampling_rate(); StopProcessorIfLastProfile(); - return profiles_->StopProfiling(title, actual_sampling_rate); + int token = token_enumerator_->GetTokenId(security_token); + return profiles_->StopProfiling(token, title, actual_sampling_rate); } diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h index 35d8d5e060..a51133d424 100644 --- a/deps/v8/src/cpu-profiler.h +++ b/deps/v8/src/cpu-profiler.h @@ -41,7 +41,7 @@ class CodeMap; class CpuProfile; class CpuProfilesCollection; class ProfileGenerator; - +class TokenEnumerator; #define CODE_EVENTS_TYPE_LIST(V) \ V(CODE_CREATION, CodeCreateEventRecord) \ @@ -94,8 +94,9 @@ class CodeDeleteEventRecord : public CodeEventRecord { class CodeAliasEventRecord : public CodeEventRecord { public: - Address alias; Address start; + CodeEntry* entry; + Address code_start; INLINE(void UpdateCodeMap(CodeMap* code_map)); }; @@ -151,7 +152,7 @@ class ProfilerEventsProcessor : public Thread { Address start, unsigned size); void CodeMoveEvent(Address from, Address to); void CodeDeleteEvent(Address from); - void FunctionCreateEvent(Address alias, Address start); + void FunctionCreateEvent(Address alias, Address start, int security_token_id); void FunctionMoveEvent(Address from, Address to); void FunctionDeleteEvent(Address from); void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag, @@ -212,10 +213,10 @@ class CpuProfiler { static void StartProfiling(const char* title); static void StartProfiling(String* title); static CpuProfile* StopProfiling(const char* title); - static CpuProfile* StopProfiling(String* title); + static CpuProfile* StopProfiling(Object* security_token, String* title); static int GetProfilesCount(); - static CpuProfile* GetProfile(int index); - static CpuProfile* FindProfile(unsigned uid); + static CpuProfile* GetProfile(Object* security_token, int index); + static CpuProfile* FindProfile(Object* security_token, unsigned uid); // Invoked from stack sampler (thread or signal handler.) static TickSample* TickSampleEvent(); @@ -252,11 +253,12 @@ class CpuProfiler { void StartCollectingProfile(String* title); void StartProcessorIfNotStarted(); CpuProfile* StopCollectingProfile(const char* title); - CpuProfile* StopCollectingProfile(String* title); + CpuProfile* StopCollectingProfile(Object* security_token, String* title); void StopProcessorIfLastProfile(); CpuProfilesCollection* profiles_; unsigned next_profile_uid_; + TokenEnumerator* token_enumerator_; ProfileGenerator* generator_; ProfilerEventsProcessor* processor_; int saved_logging_nesting_; diff --git a/deps/v8/src/d8.js b/deps/v8/src/d8.js index b9ff09cee9..5c3da13a67 100644 --- a/deps/v8/src/d8.js +++ b/deps/v8/src/d8.js @@ -341,6 +341,11 @@ function DebugRequest(cmd_line) { this.request_ = this.breakCommandToJSONRequest_(args); break; + case 'breakpoints': + case 'bb': + this.request_ = this.breakpointsCommandToJSONRequest_(args); + break; + case 'clear': this.request_ = this.clearCommandToJSONRequest_(args); break; @@ -770,6 +775,15 @@ DebugRequest.prototype.breakCommandToJSONRequest_ = function(args) { }; +DebugRequest.prototype.breakpointsCommandToJSONRequest_ = function(args) { + if (args && args.length > 0) { + throw new Error('Unexpected arguments.'); + } + var request = this.createRequest('listbreakpoints'); + return request.toJSONProtocol(); +}; + + // Create a JSON request for the clear command. DebugRequest.prototype.clearCommandToJSONRequest_ = function(args) { // Build a evaluate request from the text command. @@ -947,6 +961,39 @@ function DebugResponseDetails(response) { result += body.breakpoint; details.text = result; break; + + case 'listbreakpoints': + result = 'breakpoints: (' + body.breakpoints.length + ')'; + for (var i = 0; i < body.breakpoints.length; i++) { + var breakpoint = body.breakpoints[i]; + result += '\n id=' + breakpoint.number; + result += ' type=' + breakpoint.type; + if (breakpoint.script_id) { + result += ' script_id=' + breakpoint.script_id; + } + if (breakpoint.script_name) { + result += ' script_name=' + breakpoint.script_name; + } + result += ' line=' + breakpoint.line; + if (breakpoint.column != null) { + result += ' column=' + breakpoint.column; + } + if (breakpoint.groupId) { + result += ' groupId=' + breakpoint.groupId; + } + if (breakpoint.ignoreCount) { + result += ' ignoreCount=' + breakpoint.ignoreCount; + } + if (breakpoint.active === false) { + result += ' inactive'; + } + if (breakpoint.condition) { + result += ' condition=' + breakpoint.condition; + } + result += ' hit_count=' + breakpoint.hit_count; + } + details.text = result; + break; case 'backtrace': if (body.totalFrames == 0) { @@ -1136,8 +1183,8 @@ function DebugResponseDetails(response) { default: details.text = - 'Response for unknown command \'' + response.command + '\'' + - ' (' + json_response + ')'; + 'Response for unknown command \'' + response.command() + '\'' + + ' (' + response.raw_json() + ')'; } } catch (e) { details.text = 'Error: "' + e + '" formatting response'; @@ -1153,6 +1200,7 @@ function DebugResponseDetails(response) { * @constructor */ function ProtocolPackage(json) { + this.raw_json_ = json; this.packet_ = JSON.parse(json); this.refs_ = []; if (this.packet_.refs) { @@ -1243,6 +1291,11 @@ ProtocolPackage.prototype.lookup = function(handle) { } +ProtocolPackage.prototype.raw_json = function() { + return this.raw_json_; +} + + function ProtocolValue(value, packet) { this.value_ = value; this.packet_ = packet; diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js index b9e19d68ed..e780cb86a5 100644 --- a/deps/v8/src/date.js +++ b/deps/v8/src/date.js @@ -238,7 +238,15 @@ function LocalTime(time) { return time + DaylightSavingsOffset(time) + local_time_offset; } + +var ltcache = { + key: null, + val: null +}; + function LocalTimeNoCheck(time) { + var ltc = ltcache; + if (%_ObjectEquals(time, ltc.key)) return ltc.val; if (time < -MAX_TIME_MS || time > MAX_TIME_MS) { return $NaN; } @@ -252,7 +260,8 @@ function LocalTimeNoCheck(time) { } else { var dst_offset = DaylightSavingsOffset(time); } - return time + local_time_offset + dst_offset; + ltc.key = time; + return (ltc.val = time + local_time_offset + dst_offset); } diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js index e94cee41d2..77fa1ddd65 100644 --- a/deps/v8/src/debug-debugger.js +++ b/deps/v8/src/debug-debugger.js @@ -1266,6 +1266,8 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request) this.clearBreakPointRequest_(request, response); } else if (request.command == 'clearbreakpointgroup') { this.clearBreakPointGroupRequest_(request, response); + } else if (request.command == 'listbreakpoints') { + this.listBreakpointsRequest_(request, response); } else if (request.command == 'backtrace') { this.backtraceRequest_(request, response); } else if (request.command == 'frame') { @@ -1581,6 +1583,35 @@ DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(request, resp response.body = { breakpoint: break_point } } +DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(request, response) { + var array = []; + for (var i = 0; i < script_break_points.length; i++) { + var break_point = script_break_points[i]; + + var description = { + number: break_point.number(), + line: break_point.line(), + column: break_point.column(), + groupId: break_point.groupId(), + hit_count: break_point.hit_count(), + active: break_point.active(), + condition: break_point.condition(), + ignoreCount: break_point.ignoreCount() + } + + if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) { + description.type = 'scriptId'; + description.script_id = break_point.script_id(); + } else { + description.type = 'scriptName'; + description.script_name = break_point.script_name(); + } + array.push(description); + } + + response.body = { breakpoints: array } +} + DebugCommandProcessor.prototype.backtraceRequest_ = function(request, response) { // Get the number of frames. diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index bf1f893b7d..8cb95efd06 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -72,6 +72,17 @@ static Handle ComputeCallDebugPrepareStepIn(int argc) { } +static v8::Handle GetDebugEventContext() { + Handle context = Debug::debugger_entry()->GetContext(); + // Top::context() may have been NULL when "script collected" event occured. + if (*context == NULL) { + return v8::Local(); + } + Handle global_context(context->global_context()); + return v8::Utils::ToLocal(global_context); +} + + BreakLocationIterator::BreakLocationIterator(Handle debug_info, BreakLocatorType type) { debug_info_ = debug_info; @@ -2112,12 +2123,14 @@ void Debugger::ProcessDebugEvent(v8::DebugEvent event, if (event_listener_->IsProxy()) { // C debug event listener. Handle callback_obj(Handle::cast(event_listener_)); - v8::Debug::EventCallback callback = - FUNCTION_CAST(callback_obj->proxy()); - callback(event, - v8::Utils::ToLocal(Handle::cast(exec_state)), - v8::Utils::ToLocal(event_data), - v8::Utils::ToLocal(Handle::cast(event_listener_data_))); + v8::Debug::EventCallback2 callback = + FUNCTION_CAST(callback_obj->proxy()); + EventDetailsImpl event_details( + event, + Handle::cast(exec_state), + event_data, + event_listener_data_); + callback(event_details); } else { // JavaScript debug event listener. ASSERT(event_listener_->IsJSFunction()); @@ -2643,14 +2656,10 @@ v8::Handle MessageImpl::GetJSON() const { v8::Handle MessageImpl::GetEventContext() const { - Handle context = Debug::debugger_entry()->GetContext(); - // Top::context() may have been NULL when "script collected" event occured. - if (*context == NULL) { - ASSERT(event_ == v8::ScriptCollected); - return v8::Local(); - } - Handle global_context(context->global_context()); - return v8::Utils::ToLocal(global_context); + v8::Handle context = GetDebugEventContext(); + // Top::context() may be NULL when "script collected" event occures. + ASSERT(!context.IsEmpty() || event_ == v8::ScriptCollected); + return GetDebugEventContext(); } @@ -2659,6 +2668,41 @@ v8::Debug::ClientData* MessageImpl::GetClientData() const { } +EventDetailsImpl::EventDetailsImpl(DebugEvent event, + Handle exec_state, + Handle event_data, + Handle callback_data) + : event_(event), + exec_state_(exec_state), + event_data_(event_data), + callback_data_(callback_data) {} + + +DebugEvent EventDetailsImpl::GetEvent() const { + return event_; +} + + +v8::Handle EventDetailsImpl::GetExecutionState() const { + return v8::Utils::ToLocal(exec_state_); +} + + +v8::Handle EventDetailsImpl::GetEventData() const { + return v8::Utils::ToLocal(event_data_); +} + + +v8::Handle EventDetailsImpl::GetEventContext() const { + return GetDebugEventContext(); +} + + +v8::Handle EventDetailsImpl::GetCallbackData() const { + return v8::Utils::ToLocal(callback_data_); +} + + CommandMessage::CommandMessage() : text_(Vector::empty()), client_data_(NULL) { } diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h index e7ac94e319..e2eecb8bfb 100644 --- a/deps/v8/src/debug.h +++ b/deps/v8/src/debug.h @@ -524,6 +524,27 @@ class MessageImpl: public v8::Debug::Message { }; +// Details of the debug event delivered to the debug event listener. +class EventDetailsImpl : public v8::Debug::EventDetails { + public: + EventDetailsImpl(DebugEvent event, + Handle exec_state, + Handle event_data, + Handle callback_data); + virtual DebugEvent GetEvent() const; + virtual v8::Handle GetExecutionState() const; + virtual v8::Handle GetEventData() const; + virtual v8::Handle GetEventContext() const; + virtual v8::Handle GetCallbackData() const; + private: + DebugEvent event_; // Debug event causing the break. + Handle exec_state_; // Current execution state. + Handle event_data_; // Data associated with the event. + Handle callback_data_; // User data passed with the callback when + // it was registered. +}; + + // Message send by user to v8 debugger or debugger output message. // In addition to command text it may contain a pointer to some user data // which are expected to be passed along with the command reponse to message @@ -693,8 +714,9 @@ class Debugger { static void set_loading_debugger(bool v) { is_loading_debugger_ = v; } static bool is_loading_debugger() { return Debugger::is_loading_debugger_; } - private: static bool IsDebuggerActive(); + + private: static void ListenersChanged(); static Mutex* debugger_access_; // Mutex guarding debugger variables. diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h index 490a2c5408..c360508c17 100644 --- a/deps/v8/src/flag-definitions.h +++ b/deps/v8/src/flag-definitions.h @@ -100,10 +100,10 @@ private: DEFINE_bool(debug_code, false, "generate extra code (comments, assertions) for debugging") DEFINE_bool(emit_branch_hints, false, "emit branch hints") -DEFINE_bool(push_pop_elimination, true, - "eliminate redundant push/pops in assembly code") -DEFINE_bool(print_push_pop_elimination, false, - "print elimination of redundant push/pops in assembly code") +DEFINE_bool(peephole_optimization, true, + "perform peephole optimizations in assembly code") +DEFINE_bool(print_peephole_optimization, false, + "print peephole optimizations in assembly code") DEFINE_bool(enable_sse2, true, "enable use of SSE2 instructions if available") DEFINE_bool(enable_sse3, true, @@ -149,6 +149,10 @@ DEFINE_bool(full_compiler, true, "enable dedicated backend for run-once code") DEFINE_bool(fast_compiler, false, "enable speculative optimizing backend") DEFINE_bool(always_full_compiler, false, "try to use the dedicated run-once backend for all code") +#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64) +DEFINE_bool(force_full_compiler, false, + "force use of the dedicated run-once backend for all code") +#endif DEFINE_bool(always_fast_compiler, false, "try to use the speculative optimizing backend for all code") DEFINE_bool(trace_bailout, false, @@ -182,6 +186,11 @@ DEFINE_bool(gc_global, false, "always perform global GCs") DEFINE_int(gc_interval, -1, "garbage collect after allocations") DEFINE_bool(trace_gc, false, "print one trace line following each garbage collection") +DEFINE_bool(trace_gc_nvp, false, + "print one detailed trace line in name=value format " + "after each garbage collection") +DEFINE_bool(print_cumulative_gc_stat, false, + "print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false, "print more details following each garbage collection") DEFINE_bool(collect_maps, true, diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc index 699a1e97d5..2ccbca87ef 100644 --- a/deps/v8/src/full-codegen.cc +++ b/deps/v8/src/full-codegen.cc @@ -760,11 +760,6 @@ void FullCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) { } -void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { - UNREACHABLE(); -} - - void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) { Comment cmnt(masm_, "[ DoWhileStatement"); SetStatementPosition(stmt); @@ -810,6 +805,7 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) { Visit(stmt->body()); __ bind(loop_statement.continue_target()); + // Check stack before looping. __ StackLimitCheck(&stack_limit_hit); __ bind(&stack_check_success); @@ -872,11 +868,6 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) { } -void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { - UNREACHABLE(); -} - - void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) { Comment cmnt(masm_, "[ TryCatchStatement"); SetStatementPosition(stmt); @@ -995,12 +986,6 @@ void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) { } -void FullCodeGenerator::VisitSharedFunctionInfoLiteral( - SharedFunctionInfoLiteral* expr) { - UNREACHABLE(); -} - - void FullCodeGenerator::VisitConditional(Conditional* expr) { Comment cmnt(masm_, "[ Conditional"); Label true_case, false_case, done; @@ -1034,6 +1019,24 @@ void FullCodeGenerator::VisitLiteral(Literal* expr) { } +void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) { + Comment cmnt(masm_, "[ FunctionLiteral"); + + // Build the function boilerplate and instantiate it. + Handle function_info = + Compiler::BuildFunctionInfo(expr, script(), this); + if (HasStackOverflow()) return; + EmitNewClosure(function_info); +} + + +void FullCodeGenerator::VisitSharedFunctionInfoLiteral( + SharedFunctionInfoLiteral* expr) { + Comment cmnt(masm_, "[ SharedFunctionInfoLiteral"); + EmitNewClosure(expr->shared_function_info()); +} + + void FullCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) { // Call runtime routine to allocate the catch extension object and // assign the exception value to the catch variable. diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h index 96d0f3e7e6..c7d0093712 100644 --- a/deps/v8/src/full-codegen.h +++ b/deps/v8/src/full-codegen.h @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -31,6 +31,7 @@ #include "v8.h" #include "ast.h" +#include "compiler.h" namespace v8 { namespace internal { @@ -229,8 +230,6 @@ class FullCodeGenerator: public AstVisitor { return stack_depth + kForInStackElementCount; } private: - // TODO(lrn): Check that this value is correct when implementing - // for-in. static const int kForInStackElementCount = 5; DISALLOW_COPY_AND_ASSIGN(ForIn); }; @@ -258,12 +257,22 @@ class FullCodeGenerator: public AstVisitor { // context. void DropAndApply(int count, Expression::Context context, Register reg); + // Set up branch labels for a test expression. + void PrepareTest(Label* materialize_true, + Label* materialize_false, + Label** if_true, + Label** if_false); + // Emit code to convert pure control flow to a pair of labels into the // result expected according to an expression context. void Apply(Expression::Context context, Label* materialize_true, Label* materialize_false); + // Emit code to convert constant control flow (true or false) into + // the result expected according to an expression context. + void Apply(Expression::Context context, bool flag); + // Helper function to convert a pure value into a test context. The value // is expected on the stack or the accumulator, depending on the platform. // See the platform-specific implementation for details. @@ -348,6 +357,12 @@ class FullCodeGenerator: public AstVisitor { void VisitDeclarations(ZoneList* declarations); void DeclareGlobals(Handle pairs); + // Platform-specific code for a variable, constant, or function + // declaration. Functions have an initial value. + void EmitDeclaration(Variable* variable, + Variable::Mode mode, + FunctionLiteral* function); + // Platform-specific return sequence void EmitReturnSequence(int position); @@ -355,9 +370,48 @@ class FullCodeGenerator: public AstVisitor { void EmitCallWithStub(Call* expr); void EmitCallWithIC(Call* expr, Handle name, RelocInfo::Mode mode); + + // Platform-specific code for inline runtime calls. + void EmitInlineRuntimeCall(CallRuntime* expr); + void EmitIsSmi(ZoneList* arguments); + void EmitIsNonNegativeSmi(ZoneList* arguments); + void EmitIsObject(ZoneList* arguments); + void EmitIsUndetectableObject(ZoneList* arguments); + void EmitIsFunction(ZoneList* arguments); + void EmitIsArray(ZoneList* arguments); + void EmitIsRegExp(ZoneList* arguments); + void EmitIsConstructCall(ZoneList* arguments); + void EmitObjectEquals(ZoneList* arguments); + void EmitArguments(ZoneList* arguments); + void EmitArgumentsLength(ZoneList* arguments); + void EmitClassOf(ZoneList* arguments); + void EmitValueOf(ZoneList* arguments); + void EmitSetValueOf(ZoneList* arguments); + void EmitNumberToString(ZoneList* arguments); + void EmitCharFromCode(ZoneList* arguments); + void EmitFastCharCodeAt(ZoneList* arguments); + void EmitStringCompare(ZoneList* arguments); + void EmitStringAdd(ZoneList* arguments); + void EmitLog(ZoneList* arguments); + void EmitRandomHeapNumber(ZoneList* arguments); + void EmitSubString(ZoneList* arguments); + void EmitRegExpExec(ZoneList* arguments); + void EmitMathPow(ZoneList* arguments); + void EmitMathSin(ZoneList* arguments); + void EmitMathCos(ZoneList* arguments); + void EmitMathSqrt(ZoneList* arguments); + void EmitCallFunction(ZoneList* arguments); + void EmitRegExpConstructResult(ZoneList* arguments); + void EmitSwapElements(ZoneList* arguments); + void EmitGetFromCache(ZoneList* arguments); + // Platform-specific code for loading variables. void EmitVariableLoad(Variable* expr, Expression::Context context); + // Platform-specific support for allocating a new closure based on + // the given function info. + void EmitNewClosure(Handle info); + // Platform-specific support for compiling assignments. // Load a value from a named property. @@ -372,9 +426,15 @@ class FullCodeGenerator: public AstVisitor { // of the stack and the right one in the accumulator. void EmitBinaryOp(Token::Value op, Expression::Context context); + // Assign to the given expression as if via '='. The right-hand-side value + // is expected in the accumulator. + void EmitAssignment(Expression* expr); + // Complete a variable assignment. The right-hand-side value is expected // in the accumulator. - void EmitVariableAssignment(Variable* var, Expression::Context context); + void EmitVariableAssignment(Variable* var, + Token::Value op, + Expression::Context context); // Complete a named property assignment. The receiver is expected on top // of the stack and the right-hand-side value in the accumulator. @@ -385,6 +445,14 @@ class FullCodeGenerator: public AstVisitor { // accumulator. void EmitKeyedPropertyAssignment(Assignment* expr); + // Helper for compare operations. Expects the null-value in a register. + void EmitNullCompare(bool strict, + Register obj, + Register null_const, + Label* if_true, + Label* if_false, + Register scratch); + void SetFunctionPosition(FunctionLiteral* fun); void SetReturnPosition(FunctionLiteral* fun); void SetStatementPosition(Statement* stmt); diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h index 981ea16d72..292d8d8040 100644 --- a/deps/v8/src/globals.h +++ b/deps/v8/src/globals.h @@ -59,6 +59,24 @@ namespace internal { #error Host architecture was not detected as supported by v8 #endif +// Target architecture detection. This may be set externally. If not, detect +// in the same way as the host architecture, that is, target the native +// environment as presented by the compiler. +#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_IA32) && \ + !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS) +#if defined(_M_X64) || defined(__x86_64__) +#define V8_TARGET_ARCH_X64 1 +#elif defined(_M_IX86) || defined(__i386__) +#define V8_TARGET_ARCH_IA32 1 +#elif defined(__ARMEL__) +#define V8_TARGET_ARCH_ARM 1 +#elif defined(_MIPS_ARCH_MIPS32R2) +#define V8_TARGET_ARCH_MIPS 1 +#else +#error Target architecture was not detected as supported by v8 +#endif +#endif + // Check for supported combinations of host and target architectures. #if defined(V8_TARGET_ARCH_IA32) && !defined(V8_HOST_ARCH_IA32) #error Target architecture ia32 is only supported on ia32 host diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc index 0a276ca995..d554a3ba68 100644 --- a/deps/v8/src/heap.cc +++ b/deps/v8/src/heap.cc @@ -115,8 +115,11 @@ int Heap::external_allocation_limit_ = 0; Heap::HeapState Heap::gc_state_ = NOT_IN_GC; int Heap::mc_count_ = 0; +int Heap::ms_count_ = 0; int Heap::gc_count_ = 0; +GCTracer* Heap::tracer_ = NULL; + int Heap::unflattened_strings_length_ = 0; int Heap::always_allocate_scope_depth_ = 0; @@ -130,6 +133,11 @@ int Heap::allocation_timeout_ = 0; bool Heap::disallow_allocation_failure_ = false; #endif // DEBUG +int GCTracer::alive_after_last_gc_ = 0; +double GCTracer::last_gc_end_timestamp_ = 0.0; +int GCTracer::max_gc_pause_ = 0; +int GCTracer::max_alive_after_gc_ = 0; +int GCTracer::min_in_mutator_ = kMaxInt; int Heap::Capacity() { if (!HasBeenSetup()) return 0; @@ -570,7 +578,7 @@ void Heap::PerformGarbageCollection(AllocationSpace space, VerifySymbolTable(); if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) { ASSERT(!allocation_allowed_); - GCTracer::ExternalScope scope(tracer); + GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); global_gc_prologue_callback_(); } @@ -596,14 +604,16 @@ void Heap::PerformGarbageCollection(AllocationSpace space, old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2); old_gen_exhausted_ = false; } else { + tracer_ = tracer; Scavenge(); + tracer_ = NULL; } Counters::objs_since_last_young.Set(0); if (collector == MARK_COMPACTOR) { DisableAssertNoAllocation allow_allocation; - GCTracer::ExternalScope scope(tracer); + GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); GlobalHandles::PostGarbageCollectionProcessing(); } @@ -627,7 +637,7 @@ void Heap::PerformGarbageCollection(AllocationSpace space, if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) { ASSERT(!allocation_allowed_); - GCTracer::ExternalScope scope(tracer); + GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); global_gc_epilogue_callback_(); } VerifySymbolTable(); @@ -636,7 +646,11 @@ void Heap::PerformGarbageCollection(AllocationSpace space, void Heap::MarkCompact(GCTracer* tracer) { gc_state_ = MARK_COMPACT; - mc_count_++; + if (MarkCompactCollector::IsCompacting()) { + mc_count_++; + } else { + ms_count_++; + } tracer->set_full_gc_count(mc_count_); LOG(ResourceEvent("markcompact", "begin")); @@ -1179,6 +1193,7 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { node->set_size(object_size); *p = target; + tracer()->increment_promoted_objects_size(object_size); return; } } else { @@ -1214,6 +1229,7 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { (*p)->Iterate(&v); #endif } + tracer()->increment_promoted_objects_size(object_size); return; } } @@ -2064,7 +2080,7 @@ Object* Heap::AllocateSubString(String* buffer, } // Make an attempt to flatten the buffer to reduce access time. - buffer->TryFlatten(); + buffer = buffer->TryFlattenGetString(); Object* result = buffer->IsAsciiRepresentation() ? AllocateRawAsciiString(length, pretenure ) @@ -3760,6 +3776,17 @@ void Heap::SetStackLimits() { void Heap::TearDown() { + if (FLAG_print_cumulative_gc_stat) { + PrintF("\n\n"); + PrintF("gc_count=%d ", gc_count_); + PrintF("mark_sweep_count=%d ", ms_count_); + PrintF("mark_compact_count=%d ", mc_count_); + PrintF("max_gc_pause=%d ", GCTracer::get_max_gc_pause()); + PrintF("min_in_mutator=%d ", GCTracer::get_min_in_mutator()); + PrintF("max_alive_after_gc=%d ", GCTracer::get_max_alive_after_gc()); + PrintF("\n\n"); + } + GlobalHandles::TearDown(); ExternalStringTable::TearDown(); @@ -4235,33 +4262,114 @@ void Heap::TracePathToGlobal() { #endif +static int CountTotalHolesSize() { + int holes_size = 0; + OldSpaces spaces; + for (OldSpace* space = spaces.next(); + space != NULL; + space = spaces.next()) { + holes_size += space->Waste() + space->AvailableFree(); + } + return holes_size; +} + + GCTracer::GCTracer() : start_time_(0.0), - start_size_(0.0), - external_time_(0.0), + start_size_(0), gc_count_(0), full_gc_count_(0), is_compacting_(false), - marked_count_(0) { + marked_count_(0), + allocated_since_last_gc_(0), + spent_in_mutator_(0), + promoted_objects_size_(0) { // These two fields reflect the state of the previous full collection. // Set them before they are changed by the collector. previous_has_compacted_ = MarkCompactCollector::HasCompacted(); previous_marked_count_ = MarkCompactCollector::previous_marked_count(); - if (!FLAG_trace_gc) return; + if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return; start_time_ = OS::TimeCurrentMillis(); - start_size_ = SizeOfHeapObjects(); + start_size_ = Heap::SizeOfObjects(); + + for (int i = 0; i < Scope::kNumberOfScopes; i++) { + scopes_[i] = 0; + } + + in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(); + + allocated_since_last_gc_ = Heap::SizeOfObjects() - alive_after_last_gc_; + + if (last_gc_end_timestamp_ > 0) { + spent_in_mutator_ = Max(start_time_ - last_gc_end_timestamp_, 0.0); + } } GCTracer::~GCTracer() { - if (!FLAG_trace_gc) return; // Printf ONE line iff flag is set. - int time = static_cast(OS::TimeCurrentMillis() - start_time_); - int external_time = static_cast(external_time_); - PrintF("%s %.1f -> %.1f MB, ", - CollectorString(), start_size_, SizeOfHeapObjects()); - if (external_time > 0) PrintF("%d / ", external_time); - PrintF("%d ms.\n", time); + if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return; + + bool first_gc = (last_gc_end_timestamp_ == 0); + + alive_after_last_gc_ = Heap::SizeOfObjects(); + last_gc_end_timestamp_ = OS::TimeCurrentMillis(); + + int time = static_cast(last_gc_end_timestamp_ - start_time_); + + // Update cumulative GC statistics if required. + if (FLAG_print_cumulative_gc_stat) { + max_gc_pause_ = Max(max_gc_pause_, time); + max_alive_after_gc_ = Max(max_alive_after_gc_, alive_after_last_gc_); + if (!first_gc) { + min_in_mutator_ = Min(min_in_mutator_, + static_cast(spent_in_mutator_)); + } + } + + if (!FLAG_trace_gc_nvp) { + int external_time = static_cast(scopes_[Scope::EXTERNAL]); + + PrintF("%s %.1f -> %.1f MB, ", + CollectorString(), + static_cast(start_size_) / MB, + SizeOfHeapObjects()); + + if (external_time > 0) PrintF("%d / ", external_time); + PrintF("%d ms.\n", time); + } else { + PrintF("pause=%d ", time); + PrintF("mutator=%d ", + static_cast(spent_in_mutator_)); + + PrintF("gc="); + switch (collector_) { + case SCAVENGER: + PrintF("s"); + break; + case MARK_COMPACTOR: + PrintF(MarkCompactCollector::HasCompacted() ? "mc" : "ms"); + break; + default: + UNREACHABLE(); + } + PrintF(" "); + + PrintF("external=%d ", static_cast(scopes_[Scope::EXTERNAL])); + PrintF("mark=%d ", static_cast(scopes_[Scope::MC_MARK])); + PrintF("sweep=%d ", static_cast(scopes_[Scope::MC_SWEEP])); + PrintF("compact=%d ", static_cast(scopes_[Scope::MC_COMPACT])); + + PrintF("total_size_before=%d ", start_size_); + PrintF("total_size_after=%d ", Heap::SizeOfObjects()); + PrintF("holes_size_before=%d ", in_free_list_or_wasted_before_gc_); + PrintF("holes_size_after=%d ", CountTotalHolesSize()); + + PrintF("allocated=%d ", allocated_since_last_gc_); + PrintF("promoted=%d ", promoted_objects_size_); + + PrintF("\n"); + } #if defined(ENABLE_LOGGING_AND_PROFILING) Heap::PrintShortHeapStatistics(); diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index b4af6d9c22..74e5a31b19 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -981,6 +981,8 @@ class Heap : public AllStatic { static void ClearJSFunctionResultCaches(); + static GCTracer* tracer() { return tracer_; } + private: static int reserved_semispace_size_; static int max_semispace_size_; @@ -1020,6 +1022,7 @@ class Heap : public AllStatic { static int PromotedExternalMemorySize(); static int mc_count_; // how many mark-compact collections happened + static int ms_count_; // how many mark-sweep collections happened static int gc_count_; // how many gc happened // Total length of the strings we failed to flatten since the last GC. @@ -1223,6 +1226,8 @@ class Heap : public AllStatic { SharedFunctionInfo* shared, Object* prototype); + static GCTracer* tracer_; + // Initializes the number to string cache based on the max semispace size. static Object* InitializeNumberStringCache(); @@ -1629,19 +1634,30 @@ class DisableAssertNoAllocation { class GCTracer BASE_EMBEDDED { public: - // Time spent while in the external scope counts towards the - // external time in the tracer and will be reported separately. - class ExternalScope BASE_EMBEDDED { + class Scope BASE_EMBEDDED { public: - explicit ExternalScope(GCTracer* tracer) : tracer_(tracer) { + enum ScopeId { + EXTERNAL, + MC_MARK, + MC_SWEEP, + MC_COMPACT, + kNumberOfScopes + }; + + Scope(GCTracer* tracer, ScopeId scope) + : tracer_(tracer), + scope_(scope) { start_time_ = OS::TimeCurrentMillis(); } - ~ExternalScope() { - tracer_->external_time_ += OS::TimeCurrentMillis() - start_time_; + + ~Scope() { + ASSERT((0 <= scope_) && (scope_ < kNumberOfScopes)); + tracer_->scopes_[scope_] += OS::TimeCurrentMillis() - start_time_; } private: GCTracer* tracer_; + ScopeId scope_; double start_time_; }; @@ -1667,6 +1683,19 @@ class GCTracer BASE_EMBEDDED { int marked_count() { return marked_count_; } + void increment_promoted_objects_size(int object_size) { + promoted_objects_size_ += object_size; + } + + // Returns maximum GC pause. + static int get_max_gc_pause() { return max_gc_pause_; } + + // Returns maximum size of objects alive after GC. + static int get_max_alive_after_gc() { return max_alive_after_gc_; } + + // Returns minimal interval between two subsequent collections. + static int get_min_in_mutator() { return min_in_mutator_; } + private: // Returns a string matching the collector. const char* CollectorString(); @@ -1677,12 +1706,9 @@ class GCTracer BASE_EMBEDDED { } double start_time_; // Timestamp set in the constructor. - double start_size_; // Size of objects in heap set in constructor. + int start_size_; // Size of objects in heap set in constructor. GarbageCollector collector_; // Type of collector. - // Keep track of the amount of time spent in external callbacks. - double external_time_; - // A count (including this one, eg, the first collection is 1) of the // number of garbage collections. int gc_count_; @@ -1706,6 +1732,38 @@ class GCTracer BASE_EMBEDDED { // The count from the end of the previous full GC. Will be zero if there // was no previous full GC. int previous_marked_count_; + + // Amounts of time spent in different scopes during GC. + double scopes_[Scope::kNumberOfScopes]; + + // Total amount of space either wasted or contained in one of free lists + // before the current GC. + int in_free_list_or_wasted_before_gc_; + + // Difference between space used in the heap at the beginning of the current + // collection and the end of the previous collection. + int allocated_since_last_gc_; + + // Amount of time spent in mutator that is time elapsed between end of the + // previous collection and the beginning of the current one. + double spent_in_mutator_; + + // Size of objects promoted during the current collection. + int promoted_objects_size_; + + // Maximum GC pause. + static int max_gc_pause_; + + // Maximum size of objects alive after GC. + static int max_alive_after_gc_; + + // Minimal interval between two subsequent collections. + static int min_in_mutator_; + + // Size of objects alive after last GC. + static int alive_after_last_gc_; + + static double last_gc_end_timestamp_; }; diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h index 6dc584e62b..1d88220469 100644 --- a/deps/v8/src/ia32/assembler-ia32-inl.h +++ b/deps/v8/src/ia32/assembler-ia32-inl.h @@ -159,11 +159,6 @@ Immediate::Immediate(const ExternalReference& ext) { rmode_ = RelocInfo::EXTERNAL_REFERENCE; } -Immediate::Immediate(const char* s) { - x_ = reinterpret_cast(s); - rmode_ = RelocInfo::EMBEDDED_STRING; -} - Immediate::Immediate(Label* internal_offset) { x_ = reinterpret_cast(internal_offset); diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc index 26e40b15bc..4690c67289 100644 --- a/deps/v8/src/ia32/assembler-ia32.cc +++ b/deps/v8/src/ia32/assembler-ia32.cc @@ -36,6 +36,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_IA32) + #include "disassembler.h" #include "macro-assembler.h" #include "serialize.h" @@ -160,6 +162,15 @@ const int RelocInfo::kApplyMask = 1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE; +bool RelocInfo::IsCodedSpecially() { + // The deserializer needs to know whether a pointer is specially coded. Being + // specially coded on IA32 means that it is a relative address, as used by + // branch instructions. These are also the ones that need changing when a + // code object moves. + return (1 << rmode_) & kApplyMask; +} + + void RelocInfo::PatchCode(byte* instructions, int instruction_count) { // Patch the code at the current address with the supplied instructions. for (int i = 0; i < instruction_count; i++) { @@ -433,7 +444,7 @@ void Assembler::push(const Operand& src) { void Assembler::pop(Register dst) { ASSERT(reloc_info_writer.last_pc() != NULL); - if (FLAG_push_pop_elimination && (reloc_info_writer.last_pc() <= last_pc_)) { + if (FLAG_peephole_optimization && (reloc_info_writer.last_pc() <= last_pc_)) { // (last_pc_ != NULL) is rolled into the above check. // If a last_pc_ is set, we need to make sure that there has not been any // relocation information generated between the last instruction and this @@ -443,7 +454,7 @@ void Assembler::pop(Register dst) { int push_reg_code = instr & 0x7; if (push_reg_code == dst.code()) { pc_ = last_pc_; - if (FLAG_print_push_pop_elimination) { + if (FLAG_print_peephole_optimization) { PrintF("%d push/pop (same reg) eliminated\n", pc_offset()); } } else { @@ -452,7 +463,7 @@ void Assembler::pop(Register dst) { Register src = { push_reg_code }; EnsureSpace ensure_space(this); emit_operand(dst, Operand(src)); - if (FLAG_print_push_pop_elimination) { + if (FLAG_print_peephole_optimization) { PrintF("%d push/pop (reg->reg) eliminated\n", pc_offset()); } } @@ -466,7 +477,7 @@ void Assembler::pop(Register dst) { last_pc_[0] = 0x8b; last_pc_[1] = op1; last_pc_ = NULL; - if (FLAG_print_push_pop_elimination) { + if (FLAG_print_peephole_optimization) { PrintF("%d push/pop (op->reg) eliminated\n", pc_offset()); } return; @@ -483,7 +494,7 @@ void Assembler::pop(Register dst) { last_pc_[1] = 0xc4; last_pc_[2] = 0x04; last_pc_ = NULL; - if (FLAG_print_push_pop_elimination) { + if (FLAG_print_peephole_optimization) { PrintF("%d push/pop (mov-pop) eliminated\n", pc_offset()); } return; @@ -498,7 +509,7 @@ void Assembler::pop(Register dst) { // change to // 31c0 xor eax,eax last_pc_ = NULL; - if (FLAG_print_push_pop_elimination) { + if (FLAG_print_peephole_optimization) { PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset()); } return; @@ -521,7 +532,7 @@ void Assembler::pop(Register dst) { // b8XX000000 mov eax,0x000000XX } last_pc_ = NULL; - if (FLAG_print_push_pop_elimination) { + if (FLAG_print_peephole_optimization) { PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset()); } return; @@ -533,7 +544,7 @@ void Assembler::pop(Register dst) { last_pc_ = NULL; // change to // b8XXXXXXXX mov eax,0xXXXXXXXX - if (FLAG_print_push_pop_elimination) { + if (FLAG_print_peephole_optimization) { PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset()); } return; @@ -776,6 +787,13 @@ void Assembler::rep_stos() { } +void Assembler::stos() { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + EMIT(0xAB); +} + + void Assembler::xchg(Register dst, Register src) { EnsureSpace ensure_space(this); last_pc_ = pc_; @@ -813,7 +831,7 @@ void Assembler::add(Register dst, const Operand& src) { void Assembler::add(const Operand& dst, const Immediate& x) { ASSERT(reloc_info_writer.last_pc() != NULL); - if (FLAG_push_pop_elimination && (reloc_info_writer.last_pc() <= last_pc_)) { + if (FLAG_peephole_optimization && (reloc_info_writer.last_pc() <= last_pc_)) { byte instr = last_pc_[0]; if ((instr & 0xf8) == 0x50) { // Last instruction was a push. Check whether this is a pop without a @@ -822,7 +840,7 @@ void Assembler::add(const Operand& dst, const Immediate& x) { (x.x_ == kPointerSize) && (x.rmode_ == RelocInfo::NONE)) { pc_ = last_pc_; last_pc_ = NULL; - if (FLAG_print_push_pop_elimination) { + if (FLAG_print_peephole_optimization) { PrintF("%d push/pop(noreg) eliminated\n", pc_offset()); } return; @@ -2528,3 +2546,5 @@ void LogGeneratedCodeCoverage(const char* file_line) { #endif } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h index 6a7effd421..9ece74432b 100644 --- a/deps/v8/src/ia32/assembler-ia32.h +++ b/deps/v8/src/ia32/assembler-ia32.h @@ -194,7 +194,6 @@ inline Hint NegateHint(Hint hint) { class Immediate BASE_EMBEDDED { public: inline explicit Immediate(int x); - inline explicit Immediate(const char* s); inline explicit Immediate(const ExternalReference& ext); inline explicit Immediate(Handle handle); inline explicit Immediate(Smi* value); @@ -551,6 +550,7 @@ class Assembler : public Malloced { // Repetitive string instructions. void rep_movs(); void rep_stos(); + void stos(); // Exchange two registers void xchg(Register dst, Register src); diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc index 80e421bccd..608625817a 100644 --- a/deps/v8/src/ia32/builtins-ia32.cc +++ b/deps/v8/src/ia32/builtins-ia32.cc @@ -27,6 +27,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_IA32) + #include "codegen-inl.h" namespace v8 { @@ -806,6 +808,7 @@ static void AllocateJSArray(MacroAssembler* masm, Label* gc_required) { ASSERT(scratch.is(edi)); // rep stos destination ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count + ASSERT(!fill_with_hole || !result.is(eax)); // result is never eax // Load the initial map from the array function. __ mov(elements_array, @@ -863,15 +866,22 @@ static void AllocateJSArray(MacroAssembler* masm, if (fill_with_hole) { __ lea(edi, Operand(elements_array, FixedArray::kHeaderSize - kHeapObjectTag)); - - __ push(eax); __ mov(eax, Factory::the_hole_value()); - __ cld(); + // Do not use rep stos when filling less than kRepStosThreshold + // words. + const int kRepStosThreshold = 16; + Label loop, entry, done; + __ cmp(ecx, kRepStosThreshold); + __ j(below, &loop); // Note: ecx > 0. __ rep_stos(); - - // Restore saved registers. - __ pop(eax); + __ jmp(&done); + __ bind(&loop); + __ stos(); + __ bind(&entry); + __ cmp(edi, Operand(elements_array_end)); + __ j(below, &loop); + __ bind(&done); } } @@ -970,13 +980,14 @@ static void ArrayNativeCode(MacroAssembler* masm, AllocateJSArray(masm, edi, ecx, - eax, ebx, + eax, edx, edi, true, &prepare_generic_code_call); __ IncrementCounter(&Counters::array_function_native, 1); + __ mov(eax, ebx); __ pop(ebx); if (construct_call) { __ pop(edi); @@ -1067,7 +1078,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) { // -- esp[0] : return address // -- esp[4] : last argument // ----------------------------------- - Label generic_array_code, one_or_more_arguments, two_or_more_arguments; + Label generic_array_code; // Get the Array function. GenerateLoadArrayFunction(masm, edi); @@ -1247,3 +1258,5 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { #undef __ } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc index 63286a762a..226a374bc4 100644 --- a/deps/v8/src/ia32/codegen-ia32.cc +++ b/deps/v8/src/ia32/codegen-ia32.cc @@ -27,6 +27,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_IA32) + #include "bootstrapper.h" #include "codegen-inl.h" #include "compiler.h" @@ -2979,6 +2981,7 @@ void CodeGenerator::CallWithArguments(ZoneList* args, int arg_count = args->length(); for (int i = 0; i < arg_count; i++) { Load(args->at(i)); + frame_->SpillTop(); } // Record the position for debugging purposes. @@ -4227,8 +4230,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) { // Get the i'th entry of the array. __ mov(edx, frame_->ElementAt(2)); - __ mov(ebx, Operand(edx, eax, times_2, - FixedArray::kHeaderSize - kHeapObjectTag)); + __ mov(ebx, FixedArrayElementOperand(edx, eax)); // Get the expected map from the stack or a zero map in the // permanent slow case eax: current iteration count ebx: i'th entry @@ -4724,43 +4726,14 @@ Result CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { JumpTarget slow; JumpTarget done; - // Generate fast-case code for variables that might be shadowed by - // eval-introduced variables. Eval is used a lot without - // introducing variables. In those cases, we do not want to - // perform a runtime call for all variables in the scope - // containing the eval. - if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) { - result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow); - // If there was no control flow to slow, we can exit early. - if (!slow.is_linked()) return result; - done.Jump(&result); - - } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) { - Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot(); - // Only generate the fast case for locals that rewrite to slots. - // This rules out argument loads because eval forces arguments - // access to be through the arguments object. - if (potential_slot != NULL) { - // Allocate a fresh register to use as a temp in - // ContextSlotOperandCheckExtensions and to hold the result - // value. - result = allocator()->Allocate(); - ASSERT(result.is_valid()); - __ mov(result.reg(), - ContextSlotOperandCheckExtensions(potential_slot, - result, - &slow)); - if (potential_slot->var()->mode() == Variable::CONST) { - __ cmp(result.reg(), Factory::the_hole_value()); - done.Branch(not_equal, &result); - __ mov(result.reg(), Factory::undefined_value()); - } - // There is always control flow to slow from - // ContextSlotOperandCheckExtensions so we have to jump around - // it. - done.Jump(&result); - } - } + // Generate fast case for loading from slots that correspond to + // local/global variables or arguments unless they are shadowed by + // eval-introduced bindings. + EmitDynamicLoadFromSlotFastCase(slot, + typeof_state, + &result, + &slow, + &done); slow.Bind(); // A runtime call is inevitable. We eagerly sync frame elements @@ -4929,6 +4902,68 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions( } +void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot, + TypeofState typeof_state, + Result* result, + JumpTarget* slow, + JumpTarget* done) { + // Generate fast-case code for variables that might be shadowed by + // eval-introduced variables. Eval is used a lot without + // introducing variables. In those cases, we do not want to + // perform a runtime call for all variables in the scope + // containing the eval. + if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) { + *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow); + done->Jump(result); + + } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) { + Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot(); + Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite(); + if (potential_slot != NULL) { + // Generate fast case for locals that rewrite to slots. + // Allocate a fresh register to use as a temp in + // ContextSlotOperandCheckExtensions and to hold the result + // value. + *result = allocator()->Allocate(); + ASSERT(result->is_valid()); + __ mov(result->reg(), + ContextSlotOperandCheckExtensions(potential_slot, *result, slow)); + if (potential_slot->var()->mode() == Variable::CONST) { + __ cmp(result->reg(), Factory::the_hole_value()); + done->Branch(not_equal, result); + __ mov(result->reg(), Factory::undefined_value()); + } + done->Jump(result); + } else if (rewrite != NULL) { + // Generate fast case for calls of an argument function. + Property* property = rewrite->AsProperty(); + if (property != NULL) { + VariableProxy* obj_proxy = property->obj()->AsVariableProxy(); + Literal* key_literal = property->key()->AsLiteral(); + if (obj_proxy != NULL && + key_literal != NULL && + obj_proxy->IsArguments() && + key_literal->handle()->IsSmi()) { + // Load arguments object if there are no eval-introduced + // variables. Then load the argument from the arguments + // object using keyed load. + Result arguments = allocator()->Allocate(); + ASSERT(arguments.is_valid()); + __ mov(arguments.reg(), + ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(), + arguments, + slow)); + frame_->Push(&arguments); + frame_->Push(key_literal->handle()); + *result = EmitKeyedLoad(); + done->Jump(result); + } + } + } + } +} + + void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { if (slot->type() == Slot::LOOKUP) { ASSERT(slot->var()->is_dynamic()); @@ -5698,6 +5733,7 @@ void CodeGenerator::VisitCall(Call* node) { int arg_count = args->length(); for (int i = 0; i < arg_count; i++) { Load(args->at(i)); + frame_->SpillTop(); } // Prepare the stack for the call to ResolvePossiblyDirectEval. @@ -5747,6 +5783,7 @@ void CodeGenerator::VisitCall(Call* node) { int arg_count = args->length(); for (int i = 0; i < arg_count; i++) { Load(args->at(i)); + frame_->SpillTop(); } // Push the name of the function onto the frame. @@ -5765,59 +5802,26 @@ void CodeGenerator::VisitCall(Call* node) { // ---------------------------------- // JavaScript examples: // - // with (obj) foo(1, 2, 3) // foo is in obj + // with (obj) foo(1, 2, 3) // foo may be in obj. // // function f() {}; // function g() { // eval(...); - // f(); // f could be in extension object + // f(); // f could be in extension object. // } // ---------------------------------- - JumpTarget slow; - JumpTarget done; - - // Generate fast-case code for variables that might be shadowed by - // eval-introduced variables. Eval is used a lot without - // introducing variables. In those cases, we do not want to - // perform a runtime call for all variables in the scope - // containing the eval. + JumpTarget slow, done; Result function; - if (var->mode() == Variable::DYNAMIC_GLOBAL) { - function = LoadFromGlobalSlotCheckExtensions(var->slot(), - NOT_INSIDE_TYPEOF, - &slow); - frame_->Push(&function); - LoadGlobalReceiver(); - done.Jump(); - - } else if (var->mode() == Variable::DYNAMIC_LOCAL) { - Slot* potential_slot = var->local_if_not_shadowed()->slot(); - // Only generate the fast case for locals that rewrite to slots. - // This rules out argument loads because eval forces arguments - // access to be through the arguments object. - if (potential_slot != NULL) { - // Allocate a fresh register to use as a temp in - // ContextSlotOperandCheckExtensions and to hold the result - // value. - function = allocator()->Allocate(); - ASSERT(function.is_valid()); - __ mov(function.reg(), - ContextSlotOperandCheckExtensions(potential_slot, - function, - &slow)); - JumpTarget push_function_and_receiver; - if (potential_slot->var()->mode() == Variable::CONST) { - __ cmp(function.reg(), Factory::the_hole_value()); - push_function_and_receiver.Branch(not_equal, &function); - __ mov(function.reg(), Factory::undefined_value()); - } - push_function_and_receiver.Bind(&function); - frame_->Push(&function); - LoadGlobalReceiver(); - done.Jump(); - } - } + + // Generate fast case for loading functions from slots that + // correspond to local/global variables or arguments unless they + // are shadowed by eval-introduced bindings. + EmitDynamicLoadFromSlotFastCase(var->slot(), + NOT_INSIDE_TYPEOF, + &function, + &slow, + &done); slow.Bind(); // Enter the runtime system to load the function from the context. @@ -5839,7 +5843,18 @@ void CodeGenerator::VisitCall(Call* node) { ASSERT(!allocator()->is_used(edx)); frame_->EmitPush(edx); - done.Bind(); + // If fast case code has been generated, emit code to push the + // function and receiver and have the slow path jump around this + // code. + if (done.is_linked()) { + JumpTarget call; + call.Jump(); + done.Bind(&function); + frame_->Push(&function); + LoadGlobalReceiver(); + call.Bind(); + } + // Call the function. CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position()); @@ -5874,6 +5889,7 @@ void CodeGenerator::VisitCall(Call* node) { int arg_count = args->length(); for (int i = 0; i < arg_count; i++) { Load(args->at(i)); + frame_->SpillTop(); } // Push the name of the function onto the frame. @@ -6149,11 +6165,11 @@ void CodeGenerator::GenerateIsObject(ZoneList* args) { __ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset)); __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset)); __ cmp(map.reg(), FIRST_JS_OBJECT_TYPE); - destination()->false_target()->Branch(less); + destination()->false_target()->Branch(below); __ cmp(map.reg(), LAST_JS_OBJECT_TYPE); obj.Unuse(); map.Unuse(); - destination()->Split(less_equal); + destination()->Split(below_equal); } @@ -6266,7 +6282,7 @@ void CodeGenerator::GenerateClassOf(ZoneList* args) { __ mov(obj.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset)); __ movzx_b(tmp.reg(), FieldOperand(obj.reg(), Map::kInstanceTypeOffset)); __ cmp(tmp.reg(), FIRST_JS_OBJECT_TYPE); - null.Branch(less); + null.Branch(below); // As long as JS_FUNCTION_TYPE is the last instance type and it is // right after LAST_JS_OBJECT_TYPE, we can avoid checking for @@ -6634,16 +6650,6 @@ class DeferredSearchCache: public DeferredCode { }; -// Return a position of the element at |index_as_smi| + |additional_offset| -// in FixedArray pointer to which is held in |array|. |index_as_smi| is Smi. -static Operand ArrayElement(Register array, - Register index_as_smi, - int additional_offset = 0) { - int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize; - return FieldOperand(array, index_as_smi, times_half_pointer_size, offset); -} - - void DeferredSearchCache::Generate() { Label first_loop, search_further, second_loop, cache_miss; @@ -6660,11 +6666,11 @@ void DeferredSearchCache::Generate() { __ cmp(Operand(dst_), Immediate(kEntriesIndexSmi)); __ j(less, &search_further); - __ cmp(key_, ArrayElement(cache_, dst_)); + __ cmp(key_, CodeGenerator::FixedArrayElementOperand(cache_, dst_)); __ j(not_equal, &first_loop); __ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_); - __ mov(dst_, ArrayElement(cache_, dst_, 1)); + __ mov(dst_, CodeGenerator::FixedArrayElementOperand(cache_, dst_, 1)); __ jmp(exit_label()); __ bind(&search_further); @@ -6678,11 +6684,11 @@ void DeferredSearchCache::Generate() { __ cmp(dst_, FieldOperand(cache_, JSFunctionResultCache::kFingerOffset)); __ j(less_equal, &cache_miss); - __ cmp(key_, ArrayElement(cache_, dst_)); + __ cmp(key_, CodeGenerator::FixedArrayElementOperand(cache_, dst_)); __ j(not_equal, &second_loop); __ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_); - __ mov(dst_, ArrayElement(cache_, dst_, 1)); + __ mov(dst_, CodeGenerator::FixedArrayElementOperand(cache_, dst_, 1)); __ jmp(exit_label()); __ bind(&cache_miss); @@ -6730,7 +6736,7 @@ void DeferredSearchCache::Generate() { __ pop(ebx); // restore the key __ mov(FieldOperand(ecx, JSFunctionResultCache::kFingerOffset), edx); // Store key. - __ mov(ArrayElement(ecx, edx), ebx); + __ mov(CodeGenerator::FixedArrayElementOperand(ecx, edx), ebx); __ RecordWrite(ecx, 0, ebx, edx); // Store value. @@ -6738,7 +6744,7 @@ void DeferredSearchCache::Generate() { __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kFingerOffset)); __ add(Operand(edx), Immediate(Smi::FromInt(1))); __ mov(ebx, eax); - __ mov(ArrayElement(ecx, edx), ebx); + __ mov(CodeGenerator::FixedArrayElementOperand(ecx, edx), ebx); __ RecordWrite(ecx, 0, ebx, edx); if (!dst_.is(eax)) { @@ -6785,11 +6791,11 @@ void CodeGenerator::GenerateGetFromCache(ZoneList* args) { // tmp.reg() now holds finger offset as a smi. ASSERT(kSmiTag == 0 && kSmiTagSize == 1); __ mov(tmp.reg(), FieldOperand(cache.reg(), - JSFunctionResultCache::kFingerOffset)); - __ cmp(key.reg(), ArrayElement(cache.reg(), tmp.reg())); + JSFunctionResultCache::kFingerOffset)); + __ cmp(key.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg())); deferred->Branch(not_equal); - __ mov(tmp.reg(), ArrayElement(cache.reg(), tmp.reg(), 1)); + __ mov(tmp.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg(), 1)); deferred->BindExit(); frame_->Push(&tmp); @@ -6866,7 +6872,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList* args) { // Check that object doesn't require security checks and // has no indexed interceptor. __ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg()); - deferred->Branch(less); + deferred->Branch(below); __ movzx_b(tmp1.reg(), FieldOperand(tmp1.reg(), Map::kBitFieldOffset)); __ test(tmp1.reg(), Immediate(KeyedLoadIC::kSlowCaseBitFieldMask)); deferred->Branch(not_zero); @@ -6888,14 +6894,8 @@ void CodeGenerator::GenerateSwapElements(ZoneList* args) { deferred->Branch(not_zero); // Bring addresses into index1 and index2. - __ lea(index1.reg(), FieldOperand(tmp1.reg(), - index1.reg(), - times_half_pointer_size, // index1 is Smi - FixedArray::kHeaderSize)); - __ lea(index2.reg(), FieldOperand(tmp1.reg(), - index2.reg(), - times_half_pointer_size, // index2 is Smi - FixedArray::kHeaderSize)); + __ lea(index1.reg(), FixedArrayElementOperand(tmp1.reg(), index1.reg())); + __ lea(index2.reg(), FixedArrayElementOperand(tmp1.reg(), index2.reg())); // Swap elements. __ mov(object.reg(), Operand(index1.reg(), 0)); @@ -8192,11 +8192,11 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { __ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset)); __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset)); __ cmp(map.reg(), FIRST_JS_OBJECT_TYPE); - destination()->false_target()->Branch(less); + destination()->false_target()->Branch(below); __ cmp(map.reg(), LAST_JS_OBJECT_TYPE); answer.Unuse(); map.Unuse(); - destination()->Split(less_equal); + destination()->Split(below_equal); } else { // Uncommon case: typeof testing against a string literal that is // never returned from the typeof operator. @@ -8768,11 +8768,7 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) { deferred->Branch(not_equal); // Store the value. - __ mov(Operand(tmp.reg(), - key.reg(), - times_2, - FixedArray::kHeaderSize - kHeapObjectTag), - result.reg()); + __ mov(FixedArrayElementOperand(tmp.reg(), key.reg()), result.reg()); __ IncrementCounter(&Counters::keyed_store_inline, 1); deferred->BindExit(); @@ -9074,7 +9070,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { __ mov(ecx, Operand(esp, 3 * kPointerSize)); __ mov(eax, Operand(esp, 2 * kPointerSize)); ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0)); - __ mov(ecx, FieldOperand(ecx, eax, times_2, FixedArray::kHeaderSize)); + __ mov(ecx, CodeGenerator::FixedArrayElementOperand(ecx, eax)); __ cmp(ecx, Factory::undefined_value()); __ j(equal, &slow_case); @@ -10296,6 +10292,11 @@ void IntegerConvert(MacroAssembler* masm, Label done, right_exponent, normal_exponent; Register scratch = ebx; Register scratch2 = edi; + if (type_info.IsInteger32() && CpuFeatures::IsEnabled(SSE2)) { + CpuFeatures::Scope scope(SSE2); + __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset)); + return; + } if (!type_info.IsInteger32() || !use_sse3) { // Get exponent word. __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset)); @@ -11601,7 +11602,7 @@ void CompareStub::Generate(MacroAssembler* masm) { ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); Label first_non_object; __ cmp(ecx, FIRST_JS_OBJECT_TYPE); - __ j(less, &first_non_object); + __ j(below, &first_non_object); // Return non-zero (eax is not zero) Label return_not_equal; @@ -11618,7 +11619,7 @@ void CompareStub::Generate(MacroAssembler* masm) { __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); __ cmp(ecx, FIRST_JS_OBJECT_TYPE); - __ j(greater_equal, &return_not_equal); + __ j(above_equal, &return_not_equal); // Check for oddballs: true, false, null, undefined. __ cmp(ecx, ODDBALL_TYPE); @@ -12266,9 +12267,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); // eax - object map __ movzx_b(ecx, FieldOperand(eax, Map::kInstanceTypeOffset)); // ecx - type __ cmp(ecx, FIRST_JS_OBJECT_TYPE); - __ j(less, &slow, not_taken); + __ j(below, &slow, not_taken); __ cmp(ecx, LAST_JS_OBJECT_TYPE); - __ j(greater, &slow, not_taken); + __ j(above, &slow, not_taken); // Get the prototype of the function. __ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address @@ -12296,9 +12297,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset)); __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); __ cmp(ecx, FIRST_JS_OBJECT_TYPE); - __ j(less, &slow, not_taken); + __ j(below, &slow, not_taken); __ cmp(ecx, LAST_JS_OBJECT_TYPE); - __ j(greater, &slow, not_taken); + __ j(above, &slow, not_taken); // Register mapping: // eax is object map. @@ -13296,3 +13297,5 @@ void StringCompareStub::Generate(MacroAssembler* masm) { #undef __ } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h index 5967338da2..e00bec7131 100644 --- a/deps/v8/src/ia32/codegen-ia32.h +++ b/deps/v8/src/ia32/codegen-ia32.h @@ -28,7 +28,9 @@ #ifndef V8_IA32_CODEGEN_IA32_H_ #define V8_IA32_CODEGEN_IA32_H_ +#include "ast.h" #include "ic-inl.h" +#include "jump-target-heavy.h" namespace v8 { namespace internal { @@ -343,6 +345,15 @@ class CodeGenerator: public AstVisitor { // expected arguments. Otherwise return -1. static int InlineRuntimeCallArgumentsCount(Handle name); + // Return a position of the element at |index_as_smi| + |additional_offset| + // in FixedArray pointer to which is held in |array|. |index_as_smi| is Smi. + static Operand FixedArrayElementOperand(Register array, + Register index_as_smi, + int additional_offset = 0) { + int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize; + return FieldOperand(array, index_as_smi, times_half_pointer_size, offset); + } + private: // Construction/Destruction explicit CodeGenerator(MacroAssembler* masm); @@ -454,6 +465,16 @@ class CodeGenerator: public AstVisitor { TypeofState typeof_state, JumpTarget* slow); + // Support for loading from local/global variables and arguments + // whose location is known unless they are shadowed by + // eval-introduced bindings. Generates no code for unsupported slot + // types and therefore expects to fall through to the slow jump target. + void EmitDynamicLoadFromSlotFastCase(Slot* slot, + TypeofState typeof_state, + Result* result, + JumpTarget* slow, + JumpTarget* done); + // Store the value on top of the expression stack into a slot, leaving the // value in place. void StoreToSlot(Slot* slot, InitState init_state); diff --git a/deps/v8/src/ia32/cpu-ia32.cc b/deps/v8/src/ia32/cpu-ia32.cc index 2107ad96f4..b15140f04c 100644 --- a/deps/v8/src/ia32/cpu-ia32.cc +++ b/deps/v8/src/ia32/cpu-ia32.cc @@ -33,6 +33,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_IA32) + #include "cpu.h" #include "macro-assembler.h" @@ -77,3 +79,5 @@ void CPU::DebugBreak() { } } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc index d142b11cf7..9780f3b09e 100644 --- a/deps/v8/src/ia32/debug-ia32.cc +++ b/deps/v8/src/ia32/debug-ia32.cc @@ -27,6 +27,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_IA32) + #include "codegen-inl.h" #include "debug.h" @@ -261,3 +263,5 @@ const int Debug::kFrameDropperFrameSize = 5; #endif // ENABLE_DEBUGGER_SUPPORT } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc index 8d342e087c..58c22afcd3 100644 --- a/deps/v8/src/ia32/disasm-ia32.cc +++ b/deps/v8/src/ia32/disasm-ia32.cc @@ -30,6 +30,9 @@ #include #include "v8.h" + +#if defined(V8_TARGET_ARCH_IA32) + #include "disasm.h" namespace disasm { @@ -90,6 +93,7 @@ static ByteMnemonic zero_operands_instr[] = { {0x99, "cdq", UNSET_OP_ORDER}, {0x9B, "fwait", UNSET_OP_ORDER}, {0xFC, "cld", UNSET_OP_ORDER}, + {0xAB, "stos", UNSET_OP_ORDER}, {-1, "", UNSET_OP_ORDER} }; @@ -1438,3 +1442,5 @@ int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; } } // namespace disasm + +#endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/fast-codegen-ia32.cc b/deps/v8/src/ia32/fast-codegen-ia32.cc index 61e2b5edfc..b749e594bc 100644 --- a/deps/v8/src/ia32/fast-codegen-ia32.cc +++ b/deps/v8/src/ia32/fast-codegen-ia32.cc @@ -27,6 +27,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_IA32) + #include "codegen-inl.h" #include "fast-codegen.h" #include "data-flow.h" @@ -948,3 +950,5 @@ void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) { } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/frames-ia32.cc b/deps/v8/src/ia32/frames-ia32.cc index 5c900bedd7..212cfdeaa0 100644 --- a/deps/v8/src/ia32/frames-ia32.cc +++ b/deps/v8/src/ia32/frames-ia32.cc @@ -27,6 +27,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_IA32) + #include "frames-inl.h" namespace v8 { @@ -109,3 +111,5 @@ Address InternalFrame::GetCallerStackPointer() const { } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc index e9838ada77..368a8eeb0b 100644 --- a/deps/v8/src/ia32/full-codegen-ia32.cc +++ b/deps/v8/src/ia32/full-codegen-ia32.cc @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -27,6 +27,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_IA32) + #include "codegen-inl.h" #include "compiler.h" #include "debug.h" @@ -79,11 +81,17 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) { bool function_in_register = true; // Possibly allocate a local context. - if (scope()->num_heap_slots() > 0) { + int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + if (heap_slots > 0) { Comment cmnt(masm_, "[ Allocate local context"); // Argument to NewContext is the function, which is still in edi. __ push(edi); - __ CallRuntime(Runtime::kNewContext, 1); + if (heap_slots <= FastNewContextStub::kMaximumSlots) { + FastNewContextStub stub(heap_slots); + __ CallStub(&stub); + } else { + __ CallRuntime(Runtime::kNewContext, 1); + } function_in_register = false; // Context is returned in both eax and esi. It replaces the context // passed to us. It's saved in the stack and kept live in esi. @@ -140,7 +148,18 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) { } { Comment cmnt(masm_, "[ Declarations"); - VisitDeclarations(scope()->declarations()); + // For named function expressions, declare the function name as a + // constant. + if (scope()->is_function_scope() && scope()->function() != NULL) { + EmitDeclaration(scope()->function(), Variable::CONST, NULL); + } + // Visit all the explicit declarations unless there is an illegal + // redeclaration. + if (scope()->HasIllegalRedeclaration()) { + scope()->VisitIllegalRedeclaration(this); + } else { + VisitDeclarations(scope()->declarations()); + } } { Comment cmnt(masm_, "[ Stack check"); @@ -425,6 +444,39 @@ void FullCodeGenerator::DropAndApply(int count, } +void FullCodeGenerator::PrepareTest(Label* materialize_true, + Label* materialize_false, + Label** if_true, + Label** if_false) { + switch (context_) { + case Expression::kUninitialized: + UNREACHABLE(); + break; + case Expression::kEffect: + // In an effect context, the true and the false case branch to the + // same label. + *if_true = *if_false = materialize_true; + break; + case Expression::kValue: + *if_true = materialize_true; + *if_false = materialize_false; + break; + case Expression::kTest: + *if_true = true_label_; + *if_false = false_label_; + break; + case Expression::kValueTest: + *if_true = materialize_true; + *if_false = false_label_; + break; + case Expression::kTestValue: + *if_true = true_label_; + *if_false = materialize_false; + break; + } +} + + void FullCodeGenerator::Apply(Expression::Context context, Label* materialize_true, Label* materialize_false) { @@ -490,6 +542,61 @@ void FullCodeGenerator::Apply(Expression::Context context, } +// Convert constant control flow (true or false) to the result expected for +// a given expression context. +void FullCodeGenerator::Apply(Expression::Context context, bool flag) { + switch (context) { + case Expression::kUninitialized: + UNREACHABLE(); + break; + case Expression::kEffect: + break; + case Expression::kValue: { + Handle value = + flag ? Factory::true_value() : Factory::false_value(); + switch (location_) { + case kAccumulator: + __ mov(result_register(), value); + break; + case kStack: + __ push(Immediate(value)); + break; + } + break; + } + case Expression::kTest: + __ jmp(flag ? true_label_ : false_label_); + break; + case Expression::kTestValue: + switch (location_) { + case kAccumulator: + // If value is false it's needed. + if (!flag) __ mov(result_register(), Factory::false_value()); + break; + case kStack: + // If value is false it's needed. + if (!flag) __ push(Immediate(Factory::false_value())); + break; + } + __ jmp(flag ? true_label_ : false_label_); + break; + case Expression::kValueTest: + switch (location_) { + case kAccumulator: + // If value is true it's needed. + if (flag) __ mov(result_register(), Factory::true_value()); + break; + case kStack: + // If value is true it's needed. + if (flag) __ push(Immediate(Factory::true_value())); + break; + } + __ jmp(flag ? true_label_ : false_label_); + break; + } +} + + void FullCodeGenerator::DoTest(Expression::Context context) { // The value to test is in the accumulator. If the value might be needed // on the stack (value/test and test/value contexts with a stack location @@ -665,22 +772,22 @@ void FullCodeGenerator::Move(Slot* dst, } -void FullCodeGenerator::VisitDeclaration(Declaration* decl) { +void FullCodeGenerator::EmitDeclaration(Variable* variable, + Variable::Mode mode, + FunctionLiteral* function) { Comment cmnt(masm_, "[ Declaration"); - Variable* var = decl->proxy()->var(); - ASSERT(var != NULL); // Must have been resolved. - Slot* slot = var->slot(); - Property* prop = var->AsProperty(); - + ASSERT(variable != NULL); // Must have been resolved. + Slot* slot = variable->slot(); + Property* prop = variable->AsProperty(); if (slot != NULL) { switch (slot->type()) { case Slot::PARAMETER: case Slot::LOCAL: - if (decl->mode() == Variable::CONST) { + if (mode == Variable::CONST) { __ mov(Operand(ebp, SlotOffset(slot)), Immediate(Factory::the_hole_value())); - } else if (decl->fun() != NULL) { - VisitForValue(decl->fun(), kAccumulator); + } else if (function != NULL) { + VisitForValue(function, kAccumulator); __ mov(Operand(ebp, SlotOffset(slot)), result_register()); } break; @@ -690,7 +797,7 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) { // this specific context. // The variable in the decl always resides in the current context. - ASSERT_EQ(0, scope()->ContextChainLength(var->scope())); + ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); if (FLAG_debug_code) { // Check if we have the correct context pointer. __ mov(ebx, @@ -698,12 +805,12 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) { __ cmp(ebx, Operand(esi)); __ Check(equal, "Unexpected declaration in current context."); } - if (decl->mode() == Variable::CONST) { - __ mov(eax, Immediate(Factory::the_hole_value())); - __ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax); + if (mode == Variable::CONST) { + __ mov(CodeGenerator::ContextOperand(esi, slot->index()), + Immediate(Factory::the_hole_value())); // No write barrier since the hole value is in old space. - } else if (decl->fun() != NULL) { - VisitForValue(decl->fun(), kAccumulator); + } else if (function != NULL) { + VisitForValue(function, kAccumulator); __ mov(CodeGenerator::ContextOperand(esi, slot->index()), result_register()); int offset = Context::SlotOffset(slot->index()); @@ -714,21 +821,19 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) { case Slot::LOOKUP: { __ push(esi); - __ push(Immediate(var->name())); + __ push(Immediate(variable->name())); // Declaration nodes are always introduced in one of two modes. - ASSERT(decl->mode() == Variable::VAR || - decl->mode() == Variable::CONST); - PropertyAttributes attr = - (decl->mode() == Variable::VAR) ? NONE : READ_ONLY; + ASSERT(mode == Variable::VAR || mode == Variable::CONST); + PropertyAttributes attr = (mode == Variable::VAR) ? NONE : READ_ONLY; __ push(Immediate(Smi::FromInt(attr))); // Push initial value, if any. // Note: For variables we must not push an initial value (such as // 'undefined') because we may have a (legal) redeclaration and we // must not destroy the current value. - if (decl->mode() == Variable::CONST) { + if (mode == Variable::CONST) { __ push(Immediate(Factory::the_hole_value())); - } else if (decl->fun() != NULL) { - VisitForValue(decl->fun(), kStack); + } else if (function != NULL) { + VisitForValue(function, kStack); } else { __ push(Immediate(Smi::FromInt(0))); // No initial value! } @@ -738,13 +843,13 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) { } } else if (prop != NULL) { - if (decl->fun() != NULL || decl->mode() == Variable::CONST) { + if (function != NULL || mode == Variable::CONST) { // We are declaring a function or constant that rewrites to a // property. Use (keyed) IC to set the initial value. VisitForValue(prop->obj(), kStack); - if (decl->fun() != NULL) { + if (function != NULL) { VisitForValue(prop->key(), kStack); - VisitForValue(decl->fun(), kAccumulator); + VisitForValue(function, kAccumulator); __ pop(ecx); } else { VisitForValue(prop->key(), kAccumulator); @@ -763,6 +868,11 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) { } +void FullCodeGenerator::VisitDeclaration(Declaration* decl) { + EmitDeclaration(decl->proxy()->var(), decl->mode(), decl->fun()); +} + + void FullCodeGenerator::DeclareGlobals(Handle pairs) { // Call the runtime to declare the globals. __ push(esi); // The context is the first argument. @@ -773,19 +883,225 @@ void FullCodeGenerator::DeclareGlobals(Handle pairs) { } -void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) { - Comment cmnt(masm_, "[ FunctionLiteral"); +void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { + Comment cmnt(masm_, "[ SwitchStatement"); + Breakable nested_statement(this, stmt); + SetStatementPosition(stmt); + // Keep the switch value on the stack until a case matches. + VisitForValue(stmt->tag(), kStack); + + ZoneList* clauses = stmt->cases(); + CaseClause* default_clause = NULL; // Can occur anywhere in the list. + + Label next_test; // Recycled for each test. + // Compile all the tests with branches to their bodies. + for (int i = 0; i < clauses->length(); i++) { + CaseClause* clause = clauses->at(i); + // The default is not a test, but remember it as final fall through. + if (clause->is_default()) { + default_clause = clause; + continue; + } + + Comment cmnt(masm_, "[ Case comparison"); + __ bind(&next_test); + next_test.Unuse(); + + // Compile the label expression. + VisitForValue(clause->label(), kAccumulator); + + // Perform the comparison as if via '==='. The comparison stub expects + // the smi vs. smi case to be handled before it is called. + Label slow_case; + __ mov(edx, Operand(esp, 0)); // Switch value. + __ mov(ecx, edx); + __ or_(ecx, Operand(eax)); + __ test(ecx, Immediate(kSmiTagMask)); + __ j(not_zero, &slow_case, not_taken); + __ cmp(edx, Operand(eax)); + __ j(not_equal, &next_test); + __ Drop(1); // Switch value is no longer needed. + __ jmp(clause->body_target()->entry_label()); + + __ bind(&slow_case); + CompareStub stub(equal, true); + __ CallStub(&stub); + __ test(eax, Operand(eax)); + __ j(not_equal, &next_test); + __ Drop(1); // Switch value is no longer needed. + __ jmp(clause->body_target()->entry_label()); + } - // Build the shared function info and instantiate the function based - // on it. - Handle function_info = - Compiler::BuildFunctionInfo(expr, script(), this); - if (HasStackOverflow()) return; + // Discard the test value and jump to the default if present, otherwise to + // the end of the statement. + __ bind(&next_test); + __ Drop(1); // Switch value is no longer needed. + if (default_clause == NULL) { + __ jmp(nested_statement.break_target()); + } else { + __ jmp(default_clause->body_target()->entry_label()); + } + + // Compile all the case bodies. + for (int i = 0; i < clauses->length(); i++) { + Comment cmnt(masm_, "[ Case body"); + CaseClause* clause = clauses->at(i); + __ bind(clause->body_target()->entry_label()); + VisitStatements(clause->statements()); + } + + __ bind(nested_statement.break_target()); +} + + +void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { + Comment cmnt(masm_, "[ ForInStatement"); + SetStatementPosition(stmt); + + Label loop, exit; + ForIn loop_statement(this, stmt); + increment_loop_depth(); + + // Get the object to enumerate over. Both SpiderMonkey and JSC + // ignore null and undefined in contrast to the specification; see + // ECMA-262 section 12.6.4. + VisitForValue(stmt->enumerable(), kAccumulator); + __ cmp(eax, Factory::undefined_value()); + __ j(equal, &exit); + __ cmp(eax, Factory::null_value()); + __ j(equal, &exit); + + // Convert the object to a JS object. + Label convert, done_convert; + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &convert); + __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx); + __ j(above_equal, &done_convert); + __ bind(&convert); + __ push(eax); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ bind(&done_convert); + __ push(eax); - // Create a new closure. - __ push(esi); - __ push(Immediate(function_info)); - __ CallRuntime(Runtime::kNewClosure, 2); + // TODO(kasperl): Check cache validity in generated code. This is a + // fast case for the JSObject::IsSimpleEnum cache validity + // checks. If we cannot guarantee cache validity, call the runtime + // system to check cache validity or get the property names in a + // fixed array. + + // Get the set of properties to enumerate. + __ push(eax); // Duplicate the enumerable object on the stack. + __ CallRuntime(Runtime::kGetPropertyNamesFast, 1); + + // If we got a map from the runtime call, we can do a fast + // modification check. Otherwise, we got a fixed array, and we have + // to do a slow check. + Label fixed_array; + __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::meta_map()); + __ j(not_equal, &fixed_array); + + // We got a map in register eax. Get the enumeration cache from it. + __ mov(ecx, FieldOperand(eax, Map::kInstanceDescriptorsOffset)); + __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset)); + __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset)); + + // Setup the four remaining stack slots. + __ push(eax); // Map. + __ push(edx); // Enumeration cache. + __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset)); + __ SmiTag(eax); + __ push(eax); // Enumeration cache length (as smi). + __ push(Immediate(Smi::FromInt(0))); // Initial index. + __ jmp(&loop); + + // We got a fixed array in register eax. Iterate through that. + __ bind(&fixed_array); + __ push(Immediate(Smi::FromInt(0))); // Map (0) - force slow check. + __ push(eax); + __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset)); + __ SmiTag(eax); + __ push(eax); // Fixed array length (as smi). + __ push(Immediate(Smi::FromInt(0))); // Initial index. + + // Generate code for doing the condition check. + __ bind(&loop); + __ mov(eax, Operand(esp, 0 * kPointerSize)); // Get the current index. + __ cmp(eax, Operand(esp, 1 * kPointerSize)); // Compare to the array length. + __ j(above_equal, loop_statement.break_target()); + + // Get the current entry of the array into register ebx. + __ mov(ebx, Operand(esp, 2 * kPointerSize)); + __ mov(ebx, FieldOperand(ebx, eax, times_2, FixedArray::kHeaderSize)); + + // Get the expected map from the stack or a zero map in the + // permanent slow case into register edx. + __ mov(edx, Operand(esp, 3 * kPointerSize)); + + // Check if the expected map still matches that of the enumerable. + // If not, we have to filter the key. + Label update_each; + __ mov(ecx, Operand(esp, 4 * kPointerSize)); + __ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset)); + __ j(equal, &update_each); + + // Convert the entry to a string or null if it isn't a property + // anymore. If the property has been removed while iterating, we + // just skip it. + __ push(ecx); // Enumerable. + __ push(ebx); // Current entry. + __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION); + __ cmp(eax, Factory::null_value()); + __ j(equal, loop_statement.continue_target()); + __ mov(ebx, Operand(eax)); + + // Update the 'each' property or variable from the possibly filtered + // entry in register ebx. + __ bind(&update_each); + __ mov(result_register(), ebx); + // Perform the assignment as if via '='. + EmitAssignment(stmt->each()); + + // Generate code for the body of the loop. + Label stack_limit_hit, stack_check_done; + Visit(stmt->body()); + + __ StackLimitCheck(&stack_limit_hit); + __ bind(&stack_check_done); + + // Generate code for going to the next element by incrementing the + // index (smi) stored on top of the stack. + __ bind(loop_statement.continue_target()); + __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1))); + __ jmp(&loop); + + // Slow case for the stack limit check. + StackCheckStub stack_check_stub; + __ bind(&stack_limit_hit); + __ CallStub(&stack_check_stub); + __ jmp(&stack_check_done); + + // Remove the pointers stored on the stack. + __ bind(loop_statement.break_target()); + __ add(Operand(esp), Immediate(5 * kPointerSize)); + + // Exit and decrement the loop depth. + __ bind(&exit); + decrement_loop_depth(); +} + + +void FullCodeGenerator::EmitNewClosure(Handle info) { + // Use the fast case closure allocation code that allocates in new + // space for nested functions that don't need literals cloning. + if (scope()->is_function_scope() && info->num_literals() == 0) { + FastNewClosureStub stub; + __ push(Immediate(info)); + __ CallStub(&stub); + } else { + __ push(esi); + __ push(Immediate(info)); + __ CallRuntime(Runtime::kNewClosure, 2); + } Apply(context_, eax); } @@ -830,7 +1146,20 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var, Comment cmnt(masm_, (slot->type() == Slot::CONTEXT) ? "Context slot" : "Stack slot"); - Apply(context, slot); + if (var->mode() == Variable::CONST) { + // Constants may be the hole value if they have not been initialized. + // Unhole them. + Label done; + MemOperand slot_operand = EmitSlotSearch(slot, eax); + __ mov(eax, slot_operand); + __ cmp(eax, Factory::the_hole_value()); + __ j(not_equal, &done); + __ mov(eax, Factory::undefined_value()); + __ bind(&done); + Apply(context, eax); + } else { + Apply(context, slot); + } } else { Comment cmnt(masm_, "Rewritten parameter"); @@ -966,22 +1295,28 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { Comment cmnt(masm_, "[ ArrayLiteral"); + + ZoneList* subexprs = expr->values(); + int length = subexprs->length(); + __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset)); __ push(Immediate(Smi::FromInt(expr->literal_index()))); __ push(Immediate(expr->constant_elements())); if (expr->depth() > 1) { __ CallRuntime(Runtime::kCreateArrayLiteral, 3); - } else { + } else if (length > FastCloneShallowArrayStub::kMaximumLength) { __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); + } else { + FastCloneShallowArrayStub stub(length); + __ CallStub(&stub); } bool result_saved = false; // Is the result saved to the stack? // Emit code to evaluate all the non-constant subexpressions and to store // them into the newly cloned array. - ZoneList* subexprs = expr->values(); - for (int i = 0, len = subexprs->length(); i < len; i++) { + for (int i = 0; i < length; i++) { Expression* subexpr = subexprs->at(i); // If the subexpression is a literal or a simple materialized literal it // is already set in the cloned array. @@ -1016,7 +1351,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { void FullCodeGenerator::VisitAssignment(Assignment* expr) { Comment cmnt(masm_, "[ Assignment"); - ASSERT(expr->op() != Token::INIT_CONST); + // Invalid left-hand sides are rewritten to have a 'throw ReferenceError' + // on the left-hand side. + if (!expr->target()->IsValidLeftHandSide()) { + VisitForEffect(expr->target()); + return; + } + // Left-hand side can only be a property, a global or a (parameter or local) // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY. enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY }; @@ -1095,6 +1436,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { switch (assign_type) { case VARIABLE: EmitVariableAssignment(expr->target()->AsVariableProxy()->var(), + expr->op(), context_); break; case NAMED_PROPERTY: @@ -1137,15 +1479,66 @@ void FullCodeGenerator::EmitBinaryOp(Token::Value op, } +void FullCodeGenerator::EmitAssignment(Expression* expr) { + // Invalid left-hand sides are rewritten to have a 'throw + // ReferenceError' on the left-hand side. + if (!expr->IsValidLeftHandSide()) { + VisitForEffect(expr); + return; + } + + // Left-hand side can only be a property, a global or a (parameter or local) + // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY. + enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY }; + LhsKind assign_type = VARIABLE; + Property* prop = expr->AsProperty(); + if (prop != NULL) { + assign_type = (prop->key()->IsPropertyName()) + ? NAMED_PROPERTY + : KEYED_PROPERTY; + } + + switch (assign_type) { + case VARIABLE: { + Variable* var = expr->AsVariableProxy()->var(); + EmitVariableAssignment(var, Token::ASSIGN, Expression::kEffect); + break; + } + case NAMED_PROPERTY: { + __ push(eax); // Preserve value. + VisitForValue(prop->obj(), kAccumulator); + __ mov(edx, eax); + __ pop(eax); // Restore value. + __ mov(ecx, prop->key()->AsLiteral()->handle()); + Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + __ call(ic, RelocInfo::CODE_TARGET); + __ nop(); // Signal no inlined code. + break; + } + case KEYED_PROPERTY: { + __ push(eax); // Preserve value. + VisitForValue(prop->obj(), kStack); + VisitForValue(prop->key(), kAccumulator); + __ mov(ecx, eax); + __ pop(edx); + __ pop(eax); // Restore value. + Handle ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + __ call(ic, RelocInfo::CODE_TARGET); + __ nop(); // Signal no inlined code. + break; + } + } +} + + void FullCodeGenerator::EmitVariableAssignment(Variable* var, + Token::Value op, Expression::Context context) { - // Three main cases: global variables, lookup slots, and all other - // types of slots. Left-hand-side parameters that rewrite to - // explicit property accesses do not reach here. + // Left-hand sides that rewrite to explicit property accesses do not reach + // here. ASSERT(var != NULL); ASSERT(var->is_global() || var->slot() != NULL); - Slot* slot = var->slot(); if (var->is_global()) { ASSERT(!var->is_this()); // Assignment to a global variable. Use inline caching for the @@ -1156,44 +1549,61 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Handle ic(Builtins::builtin(Builtins::StoreIC_Initialize)); __ call(ic, RelocInfo::CODE_TARGET); __ nop(); - Apply(context, eax); - } else if (slot != NULL && slot->type() == Slot::LOOKUP) { - __ push(result_register()); // Value. - __ push(esi); // Context. - __ push(Immediate(var->name())); - __ CallRuntime(Runtime::kStoreContextSlot, 3); - Apply(context, eax); - - } else if (slot != NULL) { + } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) { + // Perform the assignment for non-const variables and for initialization + // of const variables. Const assignments are simply skipped. + Label done; + Slot* slot = var->slot(); switch (slot->type()) { - case Slot::LOCAL: case Slot::PARAMETER: - __ mov(Operand(ebp, SlotOffset(slot)), result_register()); + case Slot::LOCAL: + if (op == Token::INIT_CONST) { + // Detect const reinitialization by checking for the hole value. + __ mov(edx, Operand(ebp, SlotOffset(slot))); + __ cmp(edx, Factory::the_hole_value()); + __ j(not_equal, &done); + } + // Perform the assignment. + __ mov(Operand(ebp, SlotOffset(slot)), eax); break; case Slot::CONTEXT: { MemOperand target = EmitSlotSearch(slot, ecx); - __ mov(target, result_register()); - - // RecordWrite may destroy all its register arguments. - __ mov(edx, result_register()); + if (op == Token::INIT_CONST) { + // Detect const reinitialization by checking for the hole value. + __ mov(edx, target); + __ cmp(edx, Factory::the_hole_value()); + __ j(not_equal, &done); + } + // Perform the assignment and issue the write barrier. + __ mov(target, eax); + // The value of the assignment is in eax. RecordWrite clobbers its + // register arguments. + __ mov(edx, eax); int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; __ RecordWrite(ecx, offset, edx, ebx); break; } case Slot::LOOKUP: - UNREACHABLE(); + // Call the runtime for the assignment. The runtime will ignore + // const reinitialization. + __ push(eax); // Value. + __ push(esi); // Context. + __ push(Immediate(var->name())); + if (op == Token::INIT_CONST) { + // The runtime will ignore const redeclaration. + __ CallRuntime(Runtime::kInitializeConstContextSlot, 3); + } else { + __ CallRuntime(Runtime::kStoreContextSlot, 3); + } break; } - Apply(context, result_register()); - - } else { - // Variables rewritten as properties are not treated as variables in - // assignments. - UNREACHABLE(); + __ bind(&done); } + + Apply(context, eax); } @@ -1327,7 +1737,8 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) { } // Record source position for debugger. SetSourcePosition(expr->position()); - CallFunctionStub stub(arg_count, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE); + InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP; + CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE); __ CallStub(&stub); // Restore context register. __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); @@ -1341,16 +1752,62 @@ void FullCodeGenerator::VisitCall(Call* expr) { Variable* var = fun->AsVariableProxy()->AsVariable(); if (var != NULL && var->is_possibly_eval()) { - // Call to the identifier 'eval'. - UNREACHABLE(); + // In a call to eval, we first call %ResolvePossiblyDirectEval to + // resolve the function we need to call and the receiver of the + // call. Then we call the resolved function using the given + // arguments. + VisitForValue(fun, kStack); + __ push(Immediate(Factory::undefined_value())); // Reserved receiver slot. + + // Push the arguments. + ZoneList* args = expr->arguments(); + int arg_count = args->length(); + for (int i = 0; i < arg_count; i++) { + VisitForValue(args->at(i), kStack); + } + + // Push copy of the function - found below the arguments. + __ push(Operand(esp, (arg_count + 1) * kPointerSize)); + + // Push copy of the first argument or undefined if it doesn't exist. + if (arg_count > 0) { + __ push(Operand(esp, arg_count * kPointerSize)); + } else { + __ push(Immediate(Factory::undefined_value())); + } + + // Push the receiver of the enclosing function and do runtime call. + __ push(Operand(ebp, (2 + scope()->num_parameters()) * kPointerSize)); + __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3); + + // The runtime call returns a pair of values in eax (function) and + // edx (receiver). Touch up the stack with the right values. + __ mov(Operand(esp, (arg_count + 0) * kPointerSize), edx); + __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax); + + // Record source position for debugger. + SetSourcePosition(expr->position()); + InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP; + CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE); + __ CallStub(&stub); + // Restore context register. + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + DropAndApply(1, context_, eax); } else if (var != NULL && !var->is_this() && var->is_global()) { // Push global object as receiver for the call IC. __ push(CodeGenerator::GlobalObject()); EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT); } else if (var != NULL && var->slot() != NULL && var->slot()->type() == Slot::LOOKUP) { - // Call to a lookup slot. - UNREACHABLE(); + // Call to a lookup slot (dynamically introduced variable). Call the + // runtime to find the function to call (returned in eax) and the object + // holding it (returned in edx). + __ push(context_register()); + __ push(Immediate(var->name())); + __ CallRuntime(Runtime::kLoadContextSlot, 2); + __ push(eax); // Function. + __ push(edx); // Receiver. + EmitCallWithStub(expr); } else if (fun->AsProperty() != NULL) { // Call to an object property. Property* prop = fun->AsProperty(); @@ -1447,7 +1904,730 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) { } +void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) { + Handle name = expr->name(); + if (strcmp("_IsSmi", *name->ToCString()) == 0) { + EmitIsSmi(expr->arguments()); + } else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) { + EmitIsNonNegativeSmi(expr->arguments()); + } else if (strcmp("_IsObject", *name->ToCString()) == 0) { + EmitIsObject(expr->arguments()); + } else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) { + EmitIsUndetectableObject(expr->arguments()); + } else if (strcmp("_IsFunction", *name->ToCString()) == 0) { + EmitIsFunction(expr->arguments()); + } else if (strcmp("_IsArray", *name->ToCString()) == 0) { + EmitIsArray(expr->arguments()); + } else if (strcmp("_IsRegExp", *name->ToCString()) == 0) { + EmitIsRegExp(expr->arguments()); + } else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) { + EmitIsConstructCall(expr->arguments()); + } else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) { + EmitObjectEquals(expr->arguments()); + } else if (strcmp("_Arguments", *name->ToCString()) == 0) { + EmitArguments(expr->arguments()); + } else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) { + EmitArgumentsLength(expr->arguments()); + } else if (strcmp("_ClassOf", *name->ToCString()) == 0) { + EmitClassOf(expr->arguments()); + } else if (strcmp("_Log", *name->ToCString()) == 0) { + EmitLog(expr->arguments()); + } else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) { + EmitRandomHeapNumber(expr->arguments()); + } else if (strcmp("_SubString", *name->ToCString()) == 0) { + EmitSubString(expr->arguments()); + } else if (strcmp("_RegExpExec", *name->ToCString()) == 0) { + EmitRegExpExec(expr->arguments()); + } else if (strcmp("_ValueOf", *name->ToCString()) == 0) { + EmitValueOf(expr->arguments()); + } else if (strcmp("_SetValueOf", *name->ToCString()) == 0) { + EmitSetValueOf(expr->arguments()); + } else if (strcmp("_NumberToString", *name->ToCString()) == 0) { + EmitNumberToString(expr->arguments()); + } else if (strcmp("_CharFromCode", *name->ToCString()) == 0) { + EmitCharFromCode(expr->arguments()); + } else if (strcmp("_FastCharCodeAt", *name->ToCString()) == 0) { + EmitFastCharCodeAt(expr->arguments()); + } else if (strcmp("_StringAdd", *name->ToCString()) == 0) { + EmitStringAdd(expr->arguments()); + } else if (strcmp("_StringCompare", *name->ToCString()) == 0) { + EmitStringCompare(expr->arguments()); + } else if (strcmp("_MathPow", *name->ToCString()) == 0) { + EmitMathPow(expr->arguments()); + } else if (strcmp("_MathSin", *name->ToCString()) == 0) { + EmitMathSin(expr->arguments()); + } else if (strcmp("_MathCos", *name->ToCString()) == 0) { + EmitMathCos(expr->arguments()); + } else if (strcmp("_MathSqrt", *name->ToCString()) == 0) { + EmitMathSqrt(expr->arguments()); + } else if (strcmp("_CallFunction", *name->ToCString()) == 0) { + EmitCallFunction(expr->arguments()); + } else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) { + EmitRegExpConstructResult(expr->arguments()); + } else if (strcmp("_SwapElements", *name->ToCString()) == 0) { + EmitSwapElements(expr->arguments()); + } else if (strcmp("_GetFromCache", *name->ToCString()) == 0) { + EmitGetFromCache(expr->arguments()); + } else { + UNREACHABLE(); + } +} + + +void FullCodeGenerator::EmitIsSmi(ZoneList* args) { + ASSERT(args->length() == 1); + + VisitForValue(args->at(0), kAccumulator); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, if_true); + __ jmp(if_false); + + Apply(context_, if_true, if_false); +} + + +void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList* args) { + ASSERT(args->length() == 1); + + VisitForValue(args->at(0), kAccumulator); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + + __ test(eax, Immediate(kSmiTagMask | 0x80000000)); + __ j(zero, if_true); + __ jmp(if_false); + + Apply(context_, if_true, if_false); +} + + +void FullCodeGenerator::EmitIsObject(ZoneList* args) { + ASSERT(args->length() == 1); + + VisitForValue(args->at(0), kAccumulator); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, if_false); + __ cmp(eax, Factory::null_value()); + __ j(equal, if_true); + __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); + // Undetectable objects behave like undefined when tested with typeof. + __ movzx_b(ecx, FieldOperand(ebx, Map::kBitFieldOffset)); + __ test(ecx, Immediate(1 << Map::kIsUndetectable)); + __ j(not_zero, if_false); + __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceTypeOffset)); + __ cmp(ecx, FIRST_JS_OBJECT_TYPE); + __ j(below, if_false); + __ cmp(ecx, LAST_JS_OBJECT_TYPE); + __ j(below_equal, if_true); + __ jmp(if_false); + + Apply(context_, if_true, if_false); +} + + +void FullCodeGenerator::EmitIsUndetectableObject(ZoneList* args) { + ASSERT(args->length() == 1); + + VisitForValue(args->at(0), kAccumulator); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, if_false); + __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); + __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset)); + __ test(ebx, Immediate(1 << Map::kIsUndetectable)); + __ j(not_zero, if_true); + __ jmp(if_false); + + Apply(context_, if_true, if_false); +} + + +void FullCodeGenerator::EmitIsFunction(ZoneList* args) { + ASSERT(args->length() == 1); + + VisitForValue(args->at(0), kAccumulator); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, if_false); + __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx); + __ j(equal, if_true); + __ jmp(if_false); + + Apply(context_, if_true, if_false); +} + + +void FullCodeGenerator::EmitIsArray(ZoneList* args) { + ASSERT(args->length() == 1); + + VisitForValue(args->at(0), kAccumulator); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + + __ test(eax, Immediate(kSmiTagMask)); + __ j(equal, if_false); + __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx); + __ j(equal, if_true); + __ jmp(if_false); + + Apply(context_, if_true, if_false); +} + + +void FullCodeGenerator::EmitIsRegExp(ZoneList* args) { + ASSERT(args->length() == 1); + + VisitForValue(args->at(0), kAccumulator); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + + __ test(eax, Immediate(kSmiTagMask)); + __ j(equal, if_false); + __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx); + __ j(equal, if_true); + __ jmp(if_false); + + Apply(context_, if_true, if_false); +} + + + +void FullCodeGenerator::EmitIsConstructCall(ZoneList* args) { + ASSERT(args->length() == 0); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + + // Get the frame pointer for the calling frame. + __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + + // Skip the arguments adaptor frame if it exists. + Label check_frame_marker; + __ cmp(Operand(eax, StandardFrameConstants::kContextOffset), + Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ j(not_equal, &check_frame_marker); + __ mov(eax, Operand(eax, StandardFrameConstants::kCallerFPOffset)); + + // Check the marker in the calling frame. + __ bind(&check_frame_marker); + __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset), + Immediate(Smi::FromInt(StackFrame::CONSTRUCT))); + __ j(equal, if_true); + __ jmp(if_false); + + Apply(context_, if_true, if_false); +} + + +void FullCodeGenerator::EmitObjectEquals(ZoneList* args) { + ASSERT(args->length() == 2); + + // Load the two objects into registers and perform the comparison. + VisitForValue(args->at(0), kStack); + VisitForValue(args->at(1), kAccumulator); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + + __ pop(ebx); + __ cmp(eax, Operand(ebx)); + __ j(equal, if_true); + __ jmp(if_false); + + Apply(context_, if_true, if_false); +} + + +void FullCodeGenerator::EmitArguments(ZoneList* args) { + ASSERT(args->length() == 1); + + // ArgumentsAccessStub expects the key in edx and the formal + // parameter count in eax. + VisitForValue(args->at(0), kAccumulator); + __ mov(edx, eax); + __ mov(eax, Immediate(Smi::FromInt(scope()->num_parameters()))); + ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT); + __ CallStub(&stub); + Apply(context_, eax); +} + + +void FullCodeGenerator::EmitArgumentsLength(ZoneList* args) { + ASSERT(args->length() == 0); + + Label exit; + // Get the number of formal parameters. + __ Set(eax, Immediate(Smi::FromInt(scope()->num_parameters()))); + + // Check if the calling frame is an arguments adaptor frame. + __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset), + Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ j(not_equal, &exit); + + // Arguments adaptor case: Read the arguments length from the + // adaptor frame. + __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset)); + + __ bind(&exit); + if (FLAG_debug_code) __ AbortIfNotSmi(eax); + Apply(context_, eax); +} + + +void FullCodeGenerator::EmitClassOf(ZoneList* args) { + ASSERT(args->length() == 1); + Label done, null, function, non_function_constructor; + + VisitForValue(args->at(0), kAccumulator); + + // If the object is a smi, we return null. + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &null); + + // Check that the object is a JS object but take special care of JS + // functions to make sure they have 'Function' as their class. + __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); + __ movzx_b(ebx, FieldOperand(eax, Map::kInstanceTypeOffset)); + __ cmp(ebx, FIRST_JS_OBJECT_TYPE); + __ j(below, &null); + + // As long as JS_FUNCTION_TYPE is the last instance type and it is + // right after LAST_JS_OBJECT_TYPE, we can avoid checking for + // LAST_JS_OBJECT_TYPE. + ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); + ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); + __ cmp(ebx, JS_FUNCTION_TYPE); + __ j(equal, &function); + + // Check if the constructor in the map is a function. + __ mov(eax, FieldOperand(eax, Map::kConstructorOffset)); + __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx); + __ j(not_equal, &non_function_constructor); + + // eax now contains the constructor function. Grab the + // instance class name from there. + __ mov(eax, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset)); + __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kInstanceClassNameOffset)); + __ jmp(&done); + + // Functions have class 'Function'. + __ bind(&function); + __ mov(eax, Factory::function_class_symbol()); + __ jmp(&done); + + // Objects with a non-function constructor have class 'Object'. + __ bind(&non_function_constructor); + __ mov(eax, Factory::Object_symbol()); + __ jmp(&done); + + // Non-JS objects have class null. + __ bind(&null); + __ mov(eax, Factory::null_value()); + + // All done. + __ bind(&done); + + Apply(context_, eax); +} + + +void FullCodeGenerator::EmitLog(ZoneList* args) { + // Conditionally generate a log call. + // Args: + // 0 (literal string): The type of logging (corresponds to the flags). + // This is used to determine whether or not to generate the log call. + // 1 (string): Format string. Access the string at argument index 2 + // with '%2s' (see Logger::LogRuntime for all the formats). + // 2 (array): Arguments to the format string. + ASSERT_EQ(args->length(), 3); +#ifdef ENABLE_LOGGING_AND_PROFILING + if (CodeGenerator::ShouldGenerateLog(args->at(0))) { + VisitForValue(args->at(1), kStack); + VisitForValue(args->at(2), kStack); + __ CallRuntime(Runtime::kLog, 2); + } +#endif + // Finally, we're expected to leave a value on the top of the stack. + __ mov(eax, Factory::undefined_value()); + Apply(context_, eax); +} + + +void FullCodeGenerator::EmitRandomHeapNumber(ZoneList* args) { + ASSERT(args->length() == 0); + + Label slow_allocate_heapnumber; + Label heapnumber_allocated; + + __ AllocateHeapNumber(edi, ebx, ecx, &slow_allocate_heapnumber); + __ jmp(&heapnumber_allocated); + + __ bind(&slow_allocate_heapnumber); + // To allocate a heap number, and ensure that it is not a smi, we + // call the runtime function FUnaryMinus on 0, returning the double + // -0.0. A new, distinct heap number is returned each time. + __ push(Immediate(Smi::FromInt(0))); + __ CallRuntime(Runtime::kNumberUnaryMinus, 1); + __ mov(edi, eax); + + __ bind(&heapnumber_allocated); + + __ PrepareCallCFunction(0, ebx); + __ CallCFunction(ExternalReference::random_uint32_function(), 0); + + // Convert 32 random bits in eax to 0.(32 random bits) in a double + // by computing: + // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). + // This is implemented on both SSE2 and FPU. + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatures::Scope fscope(SSE2); + __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single. + __ movd(xmm1, Operand(ebx)); + __ movd(xmm0, Operand(eax)); + __ cvtss2sd(xmm1, xmm1); + __ pxor(xmm0, xmm1); + __ subsd(xmm0, xmm1); + __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0); + } else { + // 0x4130000000000000 is 1.0 x 2^20 as a double. + __ mov(FieldOperand(edi, HeapNumber::kExponentOffset), + Immediate(0x41300000)); + __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), eax); + __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset)); + __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), Immediate(0)); + __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset)); + __ fsubp(1); + __ fstp_d(FieldOperand(edi, HeapNumber::kValueOffset)); + } + __ mov(eax, edi); + Apply(context_, eax); +} + + +void FullCodeGenerator::EmitSubString(ZoneList* args) { + // Load the arguments on the stack and call the stub. + SubStringStub stub; + ASSERT(args->length() == 3); + VisitForValue(args->at(0), kStack); + VisitForValue(args->at(1), kStack); + VisitForValue(args->at(2), kStack); + __ CallStub(&stub); + Apply(context_, eax); +} + + +void FullCodeGenerator::EmitRegExpExec(ZoneList* args) { + // Load the arguments on the stack and call the stub. + RegExpExecStub stub; + ASSERT(args->length() == 4); + VisitForValue(args->at(0), kStack); + VisitForValue(args->at(1), kStack); + VisitForValue(args->at(2), kStack); + VisitForValue(args->at(3), kStack); + __ CallStub(&stub); + Apply(context_, eax); +} + + +void FullCodeGenerator::EmitValueOf(ZoneList* args) { + ASSERT(args->length() == 1); + + VisitForValue(args->at(0), kAccumulator); // Load the object. + + Label done; + // If the object is a smi return the object. + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &done); + // If the object is not a value type, return the object. + __ CmpObjectType(eax, JS_VALUE_TYPE, ebx); + __ j(not_equal, &done); + __ mov(eax, FieldOperand(eax, JSValue::kValueOffset)); + + __ bind(&done); + Apply(context_, eax); +} + + +void FullCodeGenerator::EmitMathPow(ZoneList* args) { + // Load the arguments on the stack and call the runtime function. + ASSERT(args->length() == 2); + VisitForValue(args->at(0), kStack); + VisitForValue(args->at(1), kStack); + __ CallRuntime(Runtime::kMath_pow, 2); + Apply(context_, eax); +} + + +void FullCodeGenerator::EmitSetValueOf(ZoneList* args) { + ASSERT(args->length() == 2); + + VisitForValue(args->at(0), kStack); // Load the object. + VisitForValue(args->at(1), kAccumulator); // Load the value. + __ pop(ebx); // eax = value. ebx = object. + + Label done; + // If the object is a smi, return the value. + __ test(ebx, Immediate(kSmiTagMask)); + __ j(zero, &done); + + // If the object is not a value type, return the value. + __ CmpObjectType(ebx, JS_VALUE_TYPE, ecx); + __ j(not_equal, &done); + + // Store the value. + __ mov(FieldOperand(ebx, JSValue::kValueOffset), eax); + // Update the write barrier. Save the value as it will be + // overwritten by the write barrier code and is needed afterward. + __ mov(edx, eax); + __ RecordWrite(ebx, JSValue::kValueOffset, edx, ecx); + + __ bind(&done); + Apply(context_, eax); +} + + +void FullCodeGenerator::EmitNumberToString(ZoneList* args) { + ASSERT_EQ(args->length(), 1); + + // Load the argument on the stack and call the stub. + VisitForValue(args->at(0), kStack); + + NumberToStringStub stub; + __ CallStub(&stub); + Apply(context_, eax); +} + + +void FullCodeGenerator::EmitCharFromCode(ZoneList* args) { + ASSERT(args->length() == 1); + + VisitForValue(args->at(0), kAccumulator); + + Label slow_case, done; + // Fast case of Heap::LookupSingleCharacterStringFromCode. + ASSERT(kSmiTag == 0); + ASSERT(kSmiShiftSize == 0); + ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); + __ test(eax, + Immediate(kSmiTagMask | + ((~String::kMaxAsciiCharCode) << kSmiTagSize))); + __ j(not_zero, &slow_case); + __ Set(ebx, Immediate(Factory::single_character_string_cache())); + ASSERT(kSmiTag == 0); + ASSERT(kSmiTagSize == 1); + ASSERT(kSmiShiftSize == 0); + // At this point code register contains smi tagged ascii char code. + __ mov(ebx, FieldOperand(ebx, + eax, times_half_pointer_size, + FixedArray::kHeaderSize)); + __ cmp(ebx, Factory::undefined_value()); + __ j(equal, &slow_case); + __ mov(eax, ebx); + __ jmp(&done); + + __ bind(&slow_case); + __ push(eax); + __ CallRuntime(Runtime::kCharFromCode, 1); + + __ bind(&done); + Apply(context_, eax); +} + + +void FullCodeGenerator::EmitFastCharCodeAt(ZoneList* args) { + // TODO(fsc): Port the complete implementation from the classic back-end. + // Move the undefined value into the result register, which will + // trigger the slow case. + __ Set(eax, Immediate(Factory::undefined_value())); + Apply(context_, eax); +} + +void FullCodeGenerator::EmitStringAdd(ZoneList* args) { + ASSERT_EQ(2, args->length()); + + VisitForValue(args->at(0), kStack); + VisitForValue(args->at(1), kStack); + + StringAddStub stub(NO_STRING_ADD_FLAGS); + __ CallStub(&stub); + Apply(context_, eax); +} + + +void FullCodeGenerator::EmitStringCompare(ZoneList* args) { + ASSERT_EQ(2, args->length()); + + VisitForValue(args->at(0), kStack); + VisitForValue(args->at(1), kStack); + + StringCompareStub stub; + __ CallStub(&stub); + Apply(context_, eax); +} + + +void FullCodeGenerator::EmitMathSin(ZoneList* args) { + // Load the argument on the stack and call the stub. + TranscendentalCacheStub stub(TranscendentalCache::SIN); + ASSERT(args->length() == 1); + VisitForValue(args->at(0), kStack); + __ CallStub(&stub); + Apply(context_, eax); +} + + +void FullCodeGenerator::EmitMathCos(ZoneList* args) { + // Load the argument on the stack and call the stub. + TranscendentalCacheStub stub(TranscendentalCache::COS); + ASSERT(args->length() == 1); + VisitForValue(args->at(0), kStack); + __ CallStub(&stub); + Apply(context_, eax); +} + + +void FullCodeGenerator::EmitMathSqrt(ZoneList* args) { + // Load the argument on the stack and call the runtime function. + ASSERT(args->length() == 1); + VisitForValue(args->at(0), kStack); + __ CallRuntime(Runtime::kMath_sqrt, 1); + Apply(context_, eax); +} + + +void FullCodeGenerator::EmitCallFunction(ZoneList* args) { + ASSERT(args->length() >= 2); + + int arg_count = args->length() - 2; // For receiver and function. + VisitForValue(args->at(0), kStack); // Receiver. + for (int i = 0; i < arg_count; i++) { + VisitForValue(args->at(i + 1), kStack); + } + VisitForValue(args->at(arg_count + 1), kAccumulator); // Function. + + // InvokeFunction requires function in edi. Move it in there. + if (!result_register().is(edi)) __ mov(edi, result_register()); + ParameterCount count(arg_count); + __ InvokeFunction(edi, count, CALL_FUNCTION); + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + Apply(context_, eax); +} + + +void FullCodeGenerator::EmitRegExpConstructResult(ZoneList* args) { + ASSERT(args->length() == 3); + VisitForValue(args->at(0), kStack); + VisitForValue(args->at(1), kStack); + VisitForValue(args->at(2), kStack); + __ CallRuntime(Runtime::kRegExpConstructResult, 3); + Apply(context_, eax); +} + + +void FullCodeGenerator::EmitSwapElements(ZoneList* args) { + ASSERT(args->length() == 3); + VisitForValue(args->at(0), kStack); + VisitForValue(args->at(1), kStack); + VisitForValue(args->at(2), kStack); + __ CallRuntime(Runtime::kSwapElements, 3); + Apply(context_, eax); +} + + +void FullCodeGenerator::EmitGetFromCache(ZoneList* args) { + ASSERT_EQ(2, args->length()); + + ASSERT_NE(NULL, args->at(0)->AsLiteral()); + int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value(); + + Handle jsfunction_result_caches( + Top::global_context()->jsfunction_result_caches()); + if (jsfunction_result_caches->length() <= cache_id) { + __ Abort("Attempt to use undefined cache."); + __ mov(eax, Factory::undefined_value()); + Apply(context_, eax); + return; + } + + VisitForValue(args->at(1), kAccumulator); + + Register key = eax; + Register cache = ebx; + Register tmp = ecx; + __ mov(cache, CodeGenerator::ContextOperand(esi, Context::GLOBAL_INDEX)); + __ mov(cache, + FieldOperand(cache, GlobalObject::kGlobalContextOffset)); + __ mov(cache, + CodeGenerator::ContextOperand( + cache, Context::JSFUNCTION_RESULT_CACHES_INDEX)); + __ mov(cache, + FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id))); + + Label done, not_found; + // tmp now holds finger offset as a smi. + ASSERT(kSmiTag == 0 && kSmiTagSize == 1); + __ mov(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset)); + __ cmp(key, CodeGenerator::FixedArrayElementOperand(cache, tmp)); + __ j(not_equal, ¬_found); + + __ mov(eax, CodeGenerator::FixedArrayElementOperand(cache, tmp, 1)); + __ jmp(&done); + + __ bind(¬_found); + // Call runtime to perform the lookup. + __ push(cache); + __ push(key); + __ CallRuntime(Runtime::kGetFromCache, 2); + + __ bind(&done); + Apply(context_, eax); +} + + void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { + Handle name = expr->name(); + if (name->length() > 0 && name->Get(0) == '_') { + Comment cmnt(masm_, "[ InlineRuntimeCall"); + EmitInlineRuntimeCall(expr); + return; + } + Comment cmnt(masm_, "[ CallRuntime"); ZoneList* args = expr->arguments(); @@ -1481,6 +2661,46 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { switch (expr->op()) { + case Token::DELETE: { + Comment cmnt(masm_, "[ UnaryOperation (DELETE)"); + Property* prop = expr->expression()->AsProperty(); + Variable* var = expr->expression()->AsVariableProxy()->AsVariable(); + if (prop == NULL && var == NULL) { + // Result of deleting non-property, non-variable reference is true. + // The subexpression may have side effects. + VisitForEffect(expr->expression()); + Apply(context_, true); + } else if (var != NULL && + !var->is_global() && + var->slot() != NULL && + var->slot()->type() != Slot::LOOKUP) { + // Result of deleting non-global, non-dynamic variables is false. + // The subexpression does not have side effects. + Apply(context_, false); + } else { + // Property or variable reference. Call the delete builtin with + // object and property name as arguments. + if (prop != NULL) { + VisitForValue(prop->obj(), kStack); + VisitForValue(prop->key(), kStack); + } else if (var->is_global()) { + __ push(CodeGenerator::GlobalObject()); + __ push(Immediate(var->name())); + } else { + // Non-global variable. Call the runtime to look up the context + // where the variable was introduced. + __ push(context_register()); + __ push(Immediate(var->name())); + __ CallRuntime(Runtime::kLookupContext, 2); + __ push(eax); + __ push(Immediate(var->name())); + } + __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION); + Apply(context_, eax); + } + break; + } + case Token::VOID: { Comment cmnt(masm_, "[ UnaryOperation (VOID)"); VisitForEffect(expr->expression()); @@ -1521,33 +2741,15 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { case Token::NOT: { Comment cmnt(masm_, "[ UnaryOperation (NOT)"); - Label materialize_true, materialize_false, done; - // Initially assume a pure test context. Notice that the labels are - // swapped. - Label* if_true = false_label_; - Label* if_false = true_label_; - switch (context_) { - case Expression::kUninitialized: - UNREACHABLE(); - break; - case Expression::kEffect: - if_true = &done; - if_false = &done; - break; - case Expression::kValue: - if_true = &materialize_false; - if_false = &materialize_true; - break; - case Expression::kTest: - break; - case Expression::kValueTest: - if_false = &materialize_true; - break; - case Expression::kTestValue: - if_true = &materialize_false; - break; - } + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + + // Notice that the labels are swapped. + PrepareTest(&materialize_true, &materialize_false, &if_false, &if_true); + VisitForControl(expr->expression(), if_true, if_false); + Apply(context_, if_false, if_true); // Labels swapped. break; } @@ -1643,6 +2845,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { Comment cmnt(masm_, "[ CountOperation"); + // Invalid left-hand sides are rewritten to have a 'throw ReferenceError' + // as the left-hand side. + if (!expr->expression()->IsValidLeftHandSide()) { + VisitForEffect(expr->expression()); + return; + } // Expression can only be a property, a global or a (parameter or local) // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY. @@ -1664,7 +2872,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { EmitVariableLoad(expr->expression()->AsVariableProxy()->var(), Expression::kValue); location_ = saved_location; - } else { + } else { // Reserve space for result of postfix operation. if (expr->is_postfix() && context_ != Expression::kEffect) { __ push(Immediate(Smi::FromInt(0))); @@ -1754,7 +2962,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { switch (assign_type) { case VARIABLE: if (expr->is_postfix()) { + // Perform the assignment as if via '='. EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(), + Token::ASSIGN, Expression::kEffect); // For all contexts except kEffect: We have the result on // top of the stack. @@ -1762,7 +2972,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { ApplyTOS(context_); } } else { + // Perform the assignment as if via '='. EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(), + Token::ASSIGN, context_); } break; @@ -1840,36 +3052,41 @@ void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) { } +void FullCodeGenerator::EmitNullCompare(bool strict, + Register obj, + Register null_const, + Label* if_true, + Label* if_false, + Register scratch) { + __ cmp(obj, Operand(null_const)); + if (strict) { + __ j(equal, if_true); + } else { + __ j(equal, if_true); + __ cmp(obj, Factory::undefined_value()); + __ j(equal, if_true); + __ test(obj, Immediate(kSmiTagMask)); + __ j(zero, if_false); + // It can be an undetectable object. + __ mov(scratch, FieldOperand(obj, HeapObject::kMapOffset)); + __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset)); + __ test(scratch, Immediate(1 << Map::kIsUndetectable)); + __ j(not_zero, if_true); + } + __ jmp(if_false); +} + + void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { Comment cmnt(masm_, "[ CompareOperation"); // Always perform the comparison for its control flow. Pack the result // into the expression's context after the comparison is performed. - Label materialize_true, materialize_false, done; - // Initially assume we are in a test context. - Label* if_true = true_label_; - Label* if_false = false_label_; - switch (context_) { - case Expression::kUninitialized: - UNREACHABLE(); - break; - case Expression::kEffect: - if_true = &done; - if_false = &done; - break; - case Expression::kValue: - if_true = &materialize_true; - if_false = &materialize_false; - break; - case Expression::kTest: - break; - case Expression::kValueTest: - if_true = &materialize_true; - break; - case Expression::kTestValue: - if_false = &materialize_false; - break; - } + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); VisitForValue(expr->left(), kStack); switch (expr->op()) { @@ -1899,10 +3116,24 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { case Token::EQ_STRICT: strict = true; // Fall through - case Token::EQ: + case Token::EQ: { cc = equal; __ pop(edx); + // If either operand is constant null we do a fast compare + // against null. + Literal* right_literal = expr->right()->AsLiteral(); + Literal* left_literal = expr->left()->AsLiteral(); + if (right_literal != NULL && right_literal->handle()->IsNull()) { + EmitNullCompare(strict, edx, eax, if_true, if_false, ecx); + Apply(context_, if_true, if_false); + return; + } else if (left_literal != NULL && left_literal->handle()->IsNull()) { + EmitNullCompare(strict, eax, edx, if_true, if_false, ecx); + Apply(context_, if_true, if_false); + return; + } break; + } case Token::LT: cc = less; __ pop(edx); @@ -2012,3 +3243,5 @@ void FullCodeGenerator::ExitFinallyBlock() { #undef __ } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc index bc7a33c6cc..644d20072e 100644 --- a/deps/v8/src/ia32/ic-ia32.cc +++ b/deps/v8/src/ia32/ic-ia32.cc @@ -27,6 +27,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_IA32) + #include "codegen-inl.h" #include "ic-inl.h" #include "runtime.h" @@ -868,7 +870,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { // ecx: key (a smi) // edx: receiver // edi: FixedArray receiver->elements - __ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax); + __ mov(CodeGenerator::FixedArrayElementOperand(edi, ecx), eax); // Update write barrier for the elements array address. __ mov(edx, Operand(eax)); __ RecordWrite(edi, 0, edx, ecx); @@ -1643,3 +1645,5 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/jump-target-ia32.cc b/deps/v8/src/ia32/jump-target-ia32.cc index cba6508031..76c0d02d4f 100644 --- a/deps/v8/src/ia32/jump-target-ia32.cc +++ b/deps/v8/src/ia32/jump-target-ia32.cc @@ -27,6 +27,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_IA32) + #include "codegen-inl.h" #include "jump-target-inl.h" #include "register-allocator-inl.h" @@ -431,3 +433,5 @@ void BreakTarget::Bind(Result* arg) { } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc index a7d2834520..ba2fe2dd4e 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/macro-assembler-ia32.cc @@ -27,6 +27,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_IA32) + #include "bootstrapper.h" #include "codegen-inl.h" #include "debug.h" @@ -1706,3 +1708,5 @@ CodePatcher::~CodePatcher() { } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc index fdf3b9febb..b0de82752b 100644 --- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc @@ -26,6 +26,9 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "v8.h" + +#if defined(V8_TARGET_ARCH_IA32) + #include "unicode.h" #include "log.h" #include "ast.h" @@ -51,7 +54,7 @@ namespace internal { * - esp : points to tip of C stack. * - ecx : points to tip of backtrack stack * - * The registers eax, ebx and ecx are free to use for computations. + * The registers eax and ebx are free to use for computations. * * Each call to a public method should retain this convention. * The stack will have the following structure: @@ -72,8 +75,6 @@ namespace internal { * - backup of caller ebx * - Offset of location before start of input (effectively character * position -1). Used to initialize capture registers to a non-position. - * - Boolean at start (if 1, we are starting at the start of the string, - * otherwise 0) * - register 0 ebp[-4] (Only positions must be stored in the first * - register 1 ebp[-8] num_saved_registers_ registers) * - ... @@ -178,8 +179,8 @@ void RegExpMacroAssemblerIA32::CheckCharacterGT(uc16 limit, Label* on_greater) { void RegExpMacroAssemblerIA32::CheckAtStart(Label* on_at_start) { Label not_at_start; // Did we start the match at the start of the string at all? - __ cmp(Operand(ebp, kAtStart), Immediate(0)); - BranchOrBacktrack(equal, ¬_at_start); + __ cmp(Operand(ebp, kStartIndex), Immediate(0)); + BranchOrBacktrack(not_equal, ¬_at_start); // If we did, are we still at the start of the input? __ lea(eax, Operand(esi, edi, times_1, 0)); __ cmp(eax, Operand(ebp, kInputStart)); @@ -190,8 +191,8 @@ void RegExpMacroAssemblerIA32::CheckAtStart(Label* on_at_start) { void RegExpMacroAssemblerIA32::CheckNotAtStart(Label* on_not_at_start) { // Did we start the match at the start of the string at all? - __ cmp(Operand(ebp, kAtStart), Immediate(0)); - BranchOrBacktrack(equal, on_not_at_start); + __ cmp(Operand(ebp, kStartIndex), Immediate(0)); + BranchOrBacktrack(not_equal, on_not_at_start); // If we did, are we still at the start of the input? __ lea(eax, Operand(esi, edi, times_1, 0)); __ cmp(eax, Operand(ebp, kInputStart)); @@ -209,6 +210,15 @@ void RegExpMacroAssemblerIA32::CheckCharacters(Vector str, int cp_offset, Label* on_failure, bool check_end_of_string) { +#ifdef DEBUG + // If input is ASCII, don't even bother calling here if the string to + // match contains a non-ascii character. + if (mode_ == ASCII) { + for (int i = 0; i < str.length(); i++) { + ASSERT(str[i] <= String::kMaxAsciiCharCodeU); + } + } +#endif int byte_length = str.length() * char_size(); int byte_offset = cp_offset * char_size(); if (check_end_of_string) { @@ -222,14 +232,56 @@ void RegExpMacroAssemblerIA32::CheckCharacters(Vector str, on_failure = &backtrack_label_; } - for (int i = 0; i < str.length(); i++) { + // Do one character test first to minimize loading for the case that + // we don't match at all (loading more than one character introduces that + // chance of reading unaligned and reading across cache boundaries). + // If the first character matches, expect a larger chance of matching the + // string, and start loading more characters at a time. + if (mode_ == ASCII) { + __ cmpb(Operand(esi, edi, times_1, byte_offset), + static_cast(str[0])); + } else { + // Don't use 16-bit immediate. The size changing prefix throws off + // pre-decoding. + __ movzx_w(eax, + Operand(esi, edi, times_1, byte_offset)); + __ cmp(eax, static_cast(str[0])); + } + BranchOrBacktrack(not_equal, on_failure); + + __ lea(ebx, Operand(esi, edi, times_1, 0)); + for (int i = 1, n = str.length(); i < n;) { if (mode_ == ASCII) { - __ cmpb(Operand(esi, edi, times_1, byte_offset + i), - static_cast(str[i])); + if (i <= n - 4) { + int combined_chars = + (static_cast(str[i + 0]) << 0) | + (static_cast(str[i + 1]) << 8) | + (static_cast(str[i + 2]) << 16) | + (static_cast(str[i + 3]) << 24); + __ cmp(Operand(ebx, byte_offset + i), Immediate(combined_chars)); + i += 4; + } else { + __ cmpb(Operand(ebx, byte_offset + i), + static_cast(str[i])); + i += 1; + } } else { ASSERT(mode_ == UC16); - __ cmpw(Operand(esi, edi, times_1, byte_offset + i * sizeof(uc16)), - Immediate(str[i])); + if (i <= n - 2) { + __ cmp(Operand(ebx, byte_offset + i * sizeof(uc16)), + Immediate(*reinterpret_cast(&str[i]))); + i += 2; + } else { + // Avoid a 16-bit immediate operation. It uses the length-changing + // 0x66 prefix which causes pre-decoder misprediction and pipeline + // stalls. See + // "Intel(R) 64 and IA-32 Architectures Optimization Reference Manual" + // (248966.pdf) section 3.4.2.3 "Length-Changing Prefixes (LCP)" + __ movzx_w(eax, + Operand(ebx, byte_offset + i * sizeof(uc16))); + __ cmp(eax, static_cast(str[i])); + i += 1; + } } BranchOrBacktrack(not_equal, on_failure); } @@ -625,7 +677,6 @@ Handle RegExpMacroAssemblerIA32::GetCode(Handle source) { __ push(edi); __ push(ebx); // Callee-save on MacOS. __ push(Immediate(0)); // Make room for "input start - 1" constant. - __ push(Immediate(0)); // Make room for "at start" constant. // Check if we have space on the stack for registers. Label stack_limit_hit; @@ -677,14 +728,6 @@ Handle RegExpMacroAssemblerIA32::GetCode(Handle source) { // position registers. __ mov(Operand(ebp, kInputStartMinusOne), eax); - // Determine whether the start index is zero, that is at the start of the - // string, and store that value in a local variable. - __ xor_(Operand(ecx), ecx); // setcc only operates on cl (lower byte of ecx). - // Register ebx still holds -stringIndex. - __ test(ebx, Operand(ebx)); - __ setcc(zero, ecx); // 1 if 0 (start of string), 0 if positive. - __ mov(Operand(ebp, kAtStart), ecx); - if (num_saved_registers_ > 0) { // Always is, if generated from a regexp. // Fill saved registers with initial value = start offset - 1 // Fill in stack push order, to avoid accessing across an unwritten @@ -712,8 +755,8 @@ Handle RegExpMacroAssemblerIA32::GetCode(Handle source) { __ mov(backtrack_stackpointer(), Operand(ebp, kStackHighEnd)); // Load previous char as initial value of current-character. Label at_start; - __ cmp(Operand(ebp, kAtStart), Immediate(0)); - __ j(not_equal, &at_start); + __ cmp(Operand(ebp, kStartIndex), Immediate(0)); + __ j(equal, &at_start); LoadCurrentCharacterUnchecked(-1, 1); // Load previous char. __ jmp(&start_label_); __ bind(&at_start); @@ -1201,3 +1244,5 @@ void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset, #endif // V8_INTERPRETED_REGEXP }} // namespace v8::internal + +#endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h index 823bc03312..8b8eeed6ce 100644 --- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h +++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h @@ -132,9 +132,8 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler { static const int kBackup_edi = kBackup_esi - kPointerSize; static const int kBackup_ebx = kBackup_edi - kPointerSize; static const int kInputStartMinusOne = kBackup_ebx - kPointerSize; - static const int kAtStart = kInputStartMinusOne - kPointerSize; // First register address. Following registers are below it on the stack. - static const int kRegisterZero = kAtStart - kPointerSize; + static const int kRegisterZero = kInputStartMinusOne - kPointerSize; // Initial size of code buffer. static const size_t kRegExpCodeSize = 1024; diff --git a/deps/v8/src/ia32/register-allocator-ia32.cc b/deps/v8/src/ia32/register-allocator-ia32.cc index 73fefb3bbf..d840c0cc5c 100644 --- a/deps/v8/src/ia32/register-allocator-ia32.cc +++ b/deps/v8/src/ia32/register-allocator-ia32.cc @@ -27,6 +27,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_IA32) + #include "codegen-inl.h" #include "register-allocator-inl.h" #include "virtual-frame-inl.h" @@ -151,3 +153,5 @@ Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() { } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc index 189c0e4d16..d7b05cf40b 100644 --- a/deps/v8/src/ia32/stub-cache-ia32.cc +++ b/deps/v8/src/ia32/stub-cache-ia32.cc @@ -27,6 +27,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_IA32) + #include "ic-inl.h" #include "codegen-inl.h" #include "stub-cache.h" @@ -2387,3 +2389,5 @@ Object* ConstructStubCompiler::CompileConstructStub( #undef __ } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/virtual-frame-ia32.cc b/deps/v8/src/ia32/virtual-frame-ia32.cc index 10aaa52b83..e22df6ec28 100644 --- a/deps/v8/src/ia32/virtual-frame-ia32.cc +++ b/deps/v8/src/ia32/virtual-frame-ia32.cc @@ -27,6 +27,8 @@ #include "v8.h" +#if defined(V8_TARGET_ARCH_IA32) + #include "codegen-inl.h" #include "register-allocator-inl.h" #include "scopes.h" @@ -1310,3 +1312,5 @@ void VirtualFrame::Push(Expression* expr) { #undef __ } } // namespace v8::internal + +#endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/virtual-frame-ia32.h b/deps/v8/src/ia32/virtual-frame-ia32.h index 14fe4662dc..a8f23b0cc8 100644 --- a/deps/v8/src/ia32/virtual-frame-ia32.h +++ b/deps/v8/src/ia32/virtual-frame-ia32.h @@ -28,9 +28,10 @@ #ifndef V8_IA32_VIRTUAL_FRAME_IA32_H_ #define V8_IA32_VIRTUAL_FRAME_IA32_H_ -#include "type-info.h" +#include "codegen.h" #include "register-allocator.h" #include "scopes.h" +#include "type-info.h" namespace v8 { namespace internal { @@ -97,23 +98,16 @@ class VirtualFrame: public ZoneObject { return register_locations_[num]; } - int register_location(Register reg) { - return register_locations_[RegisterAllocator::ToNumber(reg)]; - } + inline int register_location(Register reg); - void set_register_location(Register reg, int index) { - register_locations_[RegisterAllocator::ToNumber(reg)] = index; - } + inline void set_register_location(Register reg, int index); bool is_used(int num) { ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters); return register_locations_[num] != kIllegalIndex; } - bool is_used(Register reg) { - return register_locations_[RegisterAllocator::ToNumber(reg)] - != kIllegalIndex; - } + inline bool is_used(Register reg); // Add extra in-memory elements to the top of the frame to match an actual // frame (eg, the frame after an exception handler is pushed). No code is @@ -150,6 +144,9 @@ class VirtualFrame: public ZoneObject { // (ie, they all have frame-external references). Register SpillAnyRegister(); + // Spill the top element of the frame. + void SpillTop() { SpillElementAt(element_count() - 1); } + // Sync the range of elements in [begin, end] with memory. void SyncRange(int begin, int end); @@ -217,10 +214,7 @@ class VirtualFrame: public ZoneObject { void SetElementAt(int index, Result* value); // Set a frame element to a constant. The index is frame-top relative. - void SetElementAt(int index, Handle value) { - Result temp(value); - SetElementAt(index, &temp); - } + inline void SetElementAt(int index, Handle value); void PushElementAt(int index) { PushFrameSlotAt(element_count() - index - 1); @@ -315,10 +309,7 @@ class VirtualFrame: public ZoneObject { // Call stub given the number of arguments it expects on (and // removes from) the stack. - Result CallStub(CodeStub* stub, int arg_count) { - PrepareForCall(arg_count, arg_count); - return RawCallStub(stub); - } + inline Result CallStub(CodeStub* stub, int arg_count); // Call stub that takes a single argument passed in eax. The // argument is given as a result which does not have to be eax or @@ -361,7 +352,7 @@ class VirtualFrame: public ZoneObject { Result CallStoreIC(Handle name, bool is_contextual); // Call keyed store IC. Value, key, and receiver are found on top - // of the frame. Key and receiver are not dropped. + // of the frame. All three are dropped. Result CallKeyedStoreIC(); // Call call IC. Function name, arguments, and receiver are found on top @@ -473,12 +464,9 @@ class VirtualFrame: public ZoneObject { int register_locations_[RegisterAllocator::kNumRegisters]; // The number of frame-allocated locals and parameters respectively. - int parameter_count() { - return cgen()->scope()->num_parameters(); - } - int local_count() { - return cgen()->scope()->num_stack_slots(); - } + inline int parameter_count(); + + inline int local_count(); // The index of the element that is at the processor's frame pointer // (the ebp register). The parameters, receiver, and return address diff --git a/deps/v8/src/jump-target-heavy.cc b/deps/v8/src/jump-target-heavy.cc index 85620a2d96..468cf4a542 100644 --- a/deps/v8/src/jump-target-heavy.cc +++ b/deps/v8/src/jump-target-heavy.cc @@ -35,6 +35,9 @@ namespace v8 { namespace internal { +bool JumpTarget::compiling_deferred_code_ = false; + + void JumpTarget::Jump(Result* arg) { ASSERT(cgen()->has_valid_frame()); @@ -360,4 +363,64 @@ DeferredCode::DeferredCode() } } + +void JumpTarget::Unuse() { + reaching_frames_.Clear(); + merge_labels_.Clear(); + entry_frame_ = NULL; + entry_label_.Unuse(); +} + + +void JumpTarget::AddReachingFrame(VirtualFrame* frame) { + ASSERT(reaching_frames_.length() == merge_labels_.length()); + ASSERT(entry_frame_ == NULL); + Label fresh; + merge_labels_.Add(fresh); + reaching_frames_.Add(frame); +} + + +// ------------------------------------------------------------------------- +// BreakTarget implementation. + +void BreakTarget::set_direction(Directionality direction) { + JumpTarget::set_direction(direction); + ASSERT(cgen()->has_valid_frame()); + expected_height_ = cgen()->frame()->height(); +} + + +void BreakTarget::CopyTo(BreakTarget* destination) { + ASSERT(destination != NULL); + destination->direction_ = direction_; + destination->reaching_frames_.Rewind(0); + destination->reaching_frames_.AddAll(reaching_frames_); + destination->merge_labels_.Rewind(0); + destination->merge_labels_.AddAll(merge_labels_); + destination->entry_frame_ = entry_frame_; + destination->entry_label_ = entry_label_; + destination->expected_height_ = expected_height_; +} + + +void BreakTarget::Branch(Condition cc, Hint hint) { + ASSERT(cgen()->has_valid_frame()); + + int count = cgen()->frame()->height() - expected_height_; + if (count > 0) { + // We negate and branch here rather than using DoBranch's negate + // and branch. This gives us a hook to remove statement state + // from the frame. + JumpTarget fall_through; + // Branch to fall through will not negate, because it is a + // forward-only target. + fall_through.Branch(NegateCondition(cc), NegateHint(hint)); + Jump(); // May emit merge code here. + fall_through.Bind(); + } else { + DoBranch(cc, hint); + } +} + } } // namespace v8::internal diff --git a/deps/v8/src/jump-target-heavy.h b/deps/v8/src/jump-target-heavy.h new file mode 100644 index 0000000000..b923fe57f3 --- /dev/null +++ b/deps/v8/src/jump-target-heavy.h @@ -0,0 +1,242 @@ +// Copyright 2008 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_JUMP_TARGET_HEAVY_H_ +#define V8_JUMP_TARGET_HEAVY_H_ + +#include "macro-assembler.h" +#include "zone-inl.h" + +namespace v8 { +namespace internal { + +// Forward declarations. +class FrameElement; +class Result; +class VirtualFrame; + +// ------------------------------------------------------------------------- +// Jump targets +// +// A jump target is an abstraction of a basic-block entry in generated +// code. It collects all the virtual frames reaching the block by +// forward jumps and pairs them with labels for the merge code along +// all forward-reaching paths. When bound, an expected frame for the +// block is determined and code is generated to merge to the expected +// frame. For backward jumps, the merge code is generated at the edge +// leaving the predecessor block. +// +// A jump target must have been reached via control flow (either by +// jumping, branching, or falling through) at the time it is bound. +// In particular, this means that at least one of the control-flow +// graph edges reaching the target must be a forward edge. + +class JumpTarget : public ZoneObject { // Shadows are dynamically allocated. + public: + // Forward-only jump targets can only be reached by forward CFG edges. + enum Directionality { FORWARD_ONLY, BIDIRECTIONAL }; + + // Construct a jump target used to generate code and to provide + // access to a current frame. + explicit JumpTarget(Directionality direction) + : direction_(direction), + reaching_frames_(0), + merge_labels_(0), + entry_frame_(NULL) { + } + + // Construct a jump target. + JumpTarget() + : direction_(FORWARD_ONLY), + reaching_frames_(0), + merge_labels_(0), + entry_frame_(NULL) { + } + + virtual ~JumpTarget() {} + + // Set the direction of the jump target. + virtual void set_direction(Directionality direction) { + direction_ = direction; + } + + // Treat the jump target as a fresh one. The state is reset. + void Unuse(); + + inline CodeGenerator* cgen(); + + Label* entry_label() { return &entry_label_; } + + VirtualFrame* entry_frame() const { return entry_frame_; } + void set_entry_frame(VirtualFrame* frame) { + entry_frame_ = frame; + } + + // Predicates testing the state of the encapsulated label. + bool is_bound() const { return entry_label_.is_bound(); } + bool is_linked() const { + return !is_bound() && !reaching_frames_.is_empty(); + } + bool is_unused() const { + // This is !is_bound() && !is_linked(). + return !is_bound() && reaching_frames_.is_empty(); + } + + // Emit a jump to the target. There must be a current frame at the + // jump and there will be no current frame after the jump. + virtual void Jump(); + virtual void Jump(Result* arg); + + // Emit a conditional branch to the target. There must be a current + // frame at the branch. The current frame will fall through to the + // code after the branch. The arg is a result that is live both at + // the target and the fall-through. + virtual void Branch(Condition cc, Hint hint = no_hint); + virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint); + virtual void Branch(Condition cc, + Result* arg0, + Result* arg1, + Hint hint = no_hint); + + // Bind a jump target. If there is no current frame at the binding + // site, there must be at least one frame reaching via a forward + // jump. + virtual void Bind(); + virtual void Bind(Result* arg); + virtual void Bind(Result* arg0, Result* arg1); + + // Emit a call to a jump target. There must be a current frame at + // the call. The frame at the target is the same as the current + // frame except for an extra return address on top of it. The frame + // after the call is the same as the frame before the call. + void Call(); + + static void set_compiling_deferred_code(bool flag) { + compiling_deferred_code_ = flag; + } + + protected: + // Directionality flag set at initialization time. + Directionality direction_; + + // A list of frames reaching this block via forward jumps. + ZoneList reaching_frames_; + + // A parallel list of labels for merge code. + ZoneList