Browse Source

Merge branch 'master' into net2

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
a97fdf5c39
  1. 1
      .gitignore
  2. 54
      AUTHORS
  3. 43
      ChangeLog
  4. 9
      Makefile
  5. 2
      benchmark/http_simple.js
  6. 2
      benchmark/static_http_server.js
  7. 2
      deps/v8/AUTHORS
  8. 14
      deps/v8/ChangeLog
  9. 35
      deps/v8/SConstruct
  10. 122
      deps/v8/include/v8.h
  11. 2
      deps/v8/samples/lineprocessor.cc
  12. 20
      deps/v8/src/SConscript
  13. 37
      deps/v8/src/accessors.cc
  14. 164
      deps/v8/src/api.cc
  15. 191
      deps/v8/src/arm/assembler-arm.cc
  16. 9
      deps/v8/src/arm/assembler-arm.h
  17. 30
      deps/v8/src/arm/assembler-thumb2-inl.h
  18. 227
      deps/v8/src/arm/assembler-thumb2.cc
  19. 25
      deps/v8/src/arm/assembler-thumb2.h
  20. 143
      deps/v8/src/arm/builtins-arm.cc
  21. 902
      deps/v8/src/arm/codegen-arm.cc
  22. 100
      deps/v8/src/arm/codegen-arm.h
  23. 10
      deps/v8/src/arm/debug-arm.cc
  24. 25
      deps/v8/src/arm/disasm-arm.cc
  25. 188
      deps/v8/src/arm/fast-codegen-arm.cc
  26. 182
      deps/v8/src/arm/full-codegen-arm.cc
  27. 214
      deps/v8/src/arm/ic-arm.cc
  28. 264
      deps/v8/src/arm/macro-assembler-arm.cc
  29. 127
      deps/v8/src/arm/macro-assembler-arm.h
  30. 29
      deps/v8/src/arm/simulator-arm.cc
  31. 467
      deps/v8/src/arm/stub-cache-arm.cc
  32. 12
      deps/v8/src/arm/virtual-frame-arm.cc
  33. 9
      deps/v8/src/arm/virtual-frame-arm.h
  34. 5
      deps/v8/src/array.js
  35. 10
      deps/v8/src/assembler.cc
  36. 16
      deps/v8/src/assembler.h
  37. 39
      deps/v8/src/ast.h
  38. 177
      deps/v8/src/bootstrapper.cc
  39. 3
      deps/v8/src/bootstrapper.h
  40. 499
      deps/v8/src/builtins.cc
  41. 10
      deps/v8/src/builtins.h
  42. 4
      deps/v8/src/checks.h
  43. 16
      deps/v8/src/code-stubs.cc
  44. 5
      deps/v8/src/code-stubs.h
  45. 41
      deps/v8/src/codegen-inl.h
  46. 75
      deps/v8/src/codegen.cc
  47. 41
      deps/v8/src/codegen.h
  48. 145
      deps/v8/src/compiler.cc
  49. 129
      deps/v8/src/compiler.h
  50. 1
      deps/v8/src/contexts.h
  51. 4
      deps/v8/src/d8-readline.cc
  52. 318
      deps/v8/src/data-flow.cc
  53. 62
      deps/v8/src/data-flow.h
  54. 8
      deps/v8/src/debug-delay.js
  55. 14
      deps/v8/src/debug.cc
  56. 2
      deps/v8/src/disassembler.cc
  57. 2
      deps/v8/src/execution.cc
  58. 225
      deps/v8/src/fast-codegen.cc
  59. 94
      deps/v8/src/fast-codegen.h
  60. 4
      deps/v8/src/flag-definitions.h
  61. 4
      deps/v8/src/frame-element.cc
  62. 43
      deps/v8/src/frame-element.h
  63. 2
      deps/v8/src/frames-inl.h
  64. 7
      deps/v8/src/frames.cc
  65. 105
      deps/v8/src/full-codegen.cc
  66. 23
      deps/v8/src/full-codegen.h
  67. 5
      deps/v8/src/globals.h
  68. 47
      deps/v8/src/handles.cc
  69. 2
      deps/v8/src/handles.h
  70. 19
      deps/v8/src/heap.cc
  71. 2
      deps/v8/src/heap.h
  72. 59
      deps/v8/src/ia32/assembler-ia32.cc
  73. 5
      deps/v8/src/ia32/assembler-ia32.h
  74. 135
      deps/v8/src/ia32/builtins-ia32.cc
  75. 1644
      deps/v8/src/ia32/codegen-ia32.cc
  76. 161
      deps/v8/src/ia32/codegen-ia32.h
  77. 3
      deps/v8/src/ia32/debug-ia32.cc
  78. 1
      deps/v8/src/ia32/disasm-ia32.cc
  79. 195
      deps/v8/src/ia32/fast-codegen-ia32.cc
  80. 194
      deps/v8/src/ia32/full-codegen-ia32.cc
  81. 462
      deps/v8/src/ia32/ic-ia32.cc
  82. 149
      deps/v8/src/ia32/macro-assembler-ia32.cc
  83. 57
      deps/v8/src/ia32/macro-assembler-ia32.h
  84. 634
      deps/v8/src/ia32/stub-cache-ia32.cc
  85. 107
      deps/v8/src/ia32/virtual-frame-ia32.cc
  86. 23
      deps/v8/src/ia32/virtual-frame-ia32.h
  87. 94
      deps/v8/src/ic.cc
  88. 32
      deps/v8/src/ic.h
  89. 36
      deps/v8/src/json-delay.js
  90. 3
      deps/v8/src/jump-target-inl.h
  91. 50
      deps/v8/src/jump-target.cc
  92. 87
      deps/v8/src/liveedit.cc
  93. 78
      deps/v8/src/liveedit.h
  94. 9
      deps/v8/src/log-utils.cc
  95. 3
      deps/v8/src/log-utils.h
  96. 98
      deps/v8/src/log.cc
  97. 17
      deps/v8/src/log.h
  98. 9
      deps/v8/src/macro-assembler.h
  99. 2
      deps/v8/src/math.js
  100. 2
      deps/v8/src/messages.js

1
.gitignore

@ -5,6 +5,7 @@ tags
*.pyc *.pyc
doc/api.xml doc/api.xml
doc/api.html doc/api.html
doc/changelog.html
doc/node.1 doc/node.1
node node
node_g node_g

54
AUTHORS

@ -0,0 +1,54 @@
# Authors ordered by first contribution.
Ryan Dahl <ry@tinyclouds.org>
Urban Hafner <urban@bettong.net>
Joshaven Potter <yourtech@gmail.com>
Abe Fettig <abefettig@gmail.com>
Kevin van Zonneveld <kevin@vanzonneveld.net>
Michael Carter <cartermichael@gmail.com>
Jeff Smick <sprsquish@gmail.com>
Jon Crosby <jon@joncrosby.me>
Felix Geisendörfer <felix@debuggable.com>
Ray Morgan <rmorgan@zappos.com>
Jérémy Lal <holisme@gmail.com>
Isaac Z. Schlueter <i@izs.me>
Brandon Beacher <brandon.beacher@gmail.com>
Tim Caswell <tim@creationix.com>
Connor Dunn <connorhd@gmail.com>
Johan Sørensen <johan@johansorensen.com>
Friedemann Altrock <frodenius@gmail.com>
Onne Gorter <onne@onnlucky.com>
Rhys Jones <rhys@wave.to>
Jan Lehnardt <jan@apache.org>
Simon Willison <simon@simonwillison.net>
Chew Choon Keat <choonkeat@gmail.com>
Jed Schmidt <tr@nslator.jp>
Michaeljohn Clement <inimino@inimino.org>
Karl Guertin <grayrest@gr.ayre.st>
Xavier Shay <xavier@rhnh.net>
Christopher Lenz <chris@lamech.local>
TJ Holowaychuk <tj@vision-media.ca>
Johan Dahlberg <jfd@distrop.com>
Simon Cornelius P. Umacob <simoncpu@gmail.com>
Ryan McGrath <ryan@venodesigns.net>
Rasmus Andersson <rasmus@notion.se>
Micheil Smith <micheil@brandedcode.com>
Jonas Pfenniger <jonas@pfenniger.name>
Charles Lehner <celehner1@gmail.com>
Elliott Cable <me@ell.io>
Benjamin Thomas <benjamin@benjaminthomas.org>
Vanilla Hsu <v@fatpipi.com>
Ben Williamson <benw@pobox.com>
Joseph Pecoraro <joepeck02@gmail.com>
Erich Ocean <erich.ocean@me.com>
Alexis Sellier <self@cloudhead.net>
Blaine Cook <romeda@gmail.com>
Standa Opichal <opichals@gmail.com>
Aaron Heckmann <aaron.heckmann@gmail.com>
Mikeal Rogers <mikeal.rogers@gmail.com>
Matt Brubeck <mbrubeck@limpet.net>
Michael Stillwell <mjs@beebo.org>
Yuichiro MASUI <masui@masuidrive.jp>
Mark Hansen <mark@markhansen.co.nz>
Zoran Tomicic <ztomicic@gmail.com>
Jeremy Ashkenas <jashkenas@gmail.com>

43
ChangeLog

@ -1,4 +1,45 @@
2010.02.17, Version 0.1.29 2010.02.22, Version 0.1.30
* Major API Changes
- Promises removed. See
http://groups.google.com/group/nodejs/msg/426f3071f3eec16b
http://groups.google.com/group/nodejs/msg/df199d233ff17efa
The API for fs was
fs.readdir("/usr").addCallback(function (files) {
puts("/usr files: " + files);
});
It is now
fs.readdir("/usr", function (err, files) {
if (err) throw err;
puts("/usr files: " + files);
});
- Synchronous fs operations exposed, use with care.
- tcp.Connection.prototype.readPause() and readResume()
renamed to pause() and resume()
- http.ServerResponse.prototype.sendHeader() renamed to
writeHeader(). Now accepts reasonPhrase.
* Compact garbage on idle.
* Configurable debug ports, and --debug-brk (Zoran Tomicic)
* Better command line option parsing (Jeremy Ashkenas)
* Add fs.chmod (Micheil Smith), fs.lstat (Isaac Z. Schlueter)
* Fixes to process.mixin (Rasmus Andersson, Benjamin Thomas)
* Upgrade V8 to 2.1.1
2010.02.17, Version 0.1.29, 87d5e5b316a4276bcf881f176971c1a237dcdc7a
* Major API Changes * Major API Changes
- Remove 'file' module - Remove 'file' module

9
Makefile

@ -27,7 +27,7 @@ test-debug: all
benchmark: all benchmark: all
build/default/node benchmark/run.js build/default/node benchmark/run.js
doc: doc/node.1 doc/api.html doc/index.html doc: doc/node.1 doc/api.html doc/index.html doc/changelog.html
doc/api.html: doc/api.txt doc/api.html: doc/api.txt
asciidoc --unsafe \ asciidoc --unsafe \
@ -37,6 +37,11 @@ doc/api.html: doc/api.txt
-a linkcss \ -a linkcss \
-o doc/api.html doc/api.txt -o doc/api.html doc/api.txt
doc/changelog.html: ChangeLog
echo '<html><head><title>Node.js ChangeLog</title> <link rel="stylesheet" href="./pipe.css" type="text/css" /> <link rel="stylesheet" href="./pipe-quirks.css" type="text/css" /> <body><h1>Node.js ChangeLog</h1> <pre>' > doc/changelog.html
cat ChangeLog >> doc/changelog.html
echo '</pre></body></html>' >> doc/changelog.html
doc/api.xml: doc/api.txt doc/api.xml: doc/api.txt
asciidoc -b docbook -d manpage -o doc/api.xml doc/api.txt asciidoc -b docbook -d manpage -o doc/api.xml doc/api.txt
@ -47,7 +52,7 @@ website-upload: doc
scp doc/* ryan@nodejs.org:~/tinyclouds/node/ scp doc/* ryan@nodejs.org:~/tinyclouds/node/
docclean: docclean:
@-rm -f doc/node.1 doc/api.xml doc/api.html @-rm -f doc/node.1 doc/api.xml doc/api.html doc/changelog.html
clean: docclean clean: docclean
@$(WAF) clean @$(WAF) clean

2
benchmark/http_simple.js

@ -47,7 +47,7 @@ http.createServer(function (req, res) {
var content_length = body.length.toString(); var content_length = body.length.toString();
res.sendHeader( status res.writeHeader( status
, { "Content-Type": "text/plain" , { "Content-Type": "text/plain"
, "Content-Length": content_length , "Content-Length": content_length
} }

2
benchmark/static_http_server.js

@ -16,7 +16,7 @@ for (var i = 0; i < bytes; i++) {
} }
var server = http.createServer(function (req, res) { var server = http.createServer(function (req, res) {
res.sendHeader(200, { res.writeHeader(200, {
"Content-Type": "text/plain", "Content-Type": "text/plain",
"Content-Length": body.length "Content-Length": body.length
}); });

2
deps/v8/AUTHORS

@ -4,6 +4,7 @@
# Name/Organization <email address> # Name/Organization <email address>
Google Inc. Google Inc.
Sigma Designs Inc.
Alexander Botero-Lowry <alexbl@FreeBSD.org> Alexander Botero-Lowry <alexbl@FreeBSD.org>
Alexandre Vassalotti <avassalotti@gmail.com> Alexandre Vassalotti <avassalotti@gmail.com>
@ -22,3 +23,4 @@ Rene Rebe <rene@exactcode.de>
Ryan Dahl <coldredlemur@gmail.com> Ryan Dahl <coldredlemur@gmail.com>
Patrick Gansterer <paroga@paroga.com> Patrick Gansterer <paroga@paroga.com>
Subrato K De <subratokde@codeaurora.org> Subrato K De <subratokde@codeaurora.org>
Dineel D Sule <dsule@codeaurora.org>

14
deps/v8/ChangeLog

@ -1,3 +1,17 @@
2010-02-19: Version 2.1.1
[ES5] Implemented Object.defineProperty.
Improved profiler support.
Added SetPrototype method in the public V8 API.
Added GetScriptOrigin and GetScriptLineNumber methods to Function
objects in the API.
Performance improvements on all platforms.
2010-02-03: Version 2.1.0 2010-02-03: Version 2.1.0
Values are now always wrapped in objects when used as a receiver. Values are now always wrapped in objects when used as a receiver.

35
deps/v8/SConstruct

@ -191,6 +191,17 @@ LIBRARY_FLAGS = {
'armvariant:arm': { 'armvariant:arm': {
'CPPDEFINES': ['V8_ARM_VARIANT_ARM'] 'CPPDEFINES': ['V8_ARM_VARIANT_ARM']
}, },
'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
'simulator:none': {
'CCFLAGS': ['-EL', '-mips32r2', '-Wa,-mips32r2', '-fno-inline'],
'LDFLAGS': ['-EL']
}
},
'simulator:mips': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'arch:x64': { 'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64'], 'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
'CCFLAGS': ['-m64'], 'CCFLAGS': ['-m64'],
@ -292,6 +303,9 @@ V8_EXTRA_FLAGS = {
# used by the arm simulator. # used by the arm simulator.
'WARNINGFLAGS': ['/wd4996'] 'WARNINGFLAGS': ['/wd4996']
}, },
'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
},
'disassembler:on': { 'disassembler:on': {
'CPPDEFINES': ['ENABLE_DISASSEMBLER'] 'CPPDEFINES': ['ENABLE_DISASSEMBLER']
} }
@ -457,10 +471,22 @@ SAMPLE_FLAGS = {
'CCFLAGS': ['-m64'], 'CCFLAGS': ['-m64'],
'LINKFLAGS': ['-m64'] 'LINKFLAGS': ['-m64']
}, },
'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
'simulator:none': {
'CCFLAGS': ['-EL', '-mips32r2', '-Wa,-mips32r2', '-fno-inline'],
'LINKFLAGS': ['-EL'],
'LDFLAGS': ['-EL']
}
},
'simulator:arm': { 'simulator:arm': {
'CCFLAGS': ['-m32'], 'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32'] 'LINKFLAGS': ['-m32']
}, },
'simulator:mips': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'mode:release': { 'mode:release': {
'CCFLAGS': ['-O2'] 'CCFLAGS': ['-O2']
}, },
@ -601,7 +627,7 @@ SIMPLE_OPTIONS = {
'help': 'the os to build for (' + OS_GUESS + ')' 'help': 'the os to build for (' + OS_GUESS + ')'
}, },
'arch': { 'arch': {
'values':['arm', 'ia32', 'x64'], 'values':['arm', 'ia32', 'x64', 'mips'],
'default': ARCH_GUESS, 'default': ARCH_GUESS,
'help': 'the architecture to build for (' + ARCH_GUESS + ')' 'help': 'the architecture to build for (' + ARCH_GUESS + ')'
}, },
@ -651,7 +677,7 @@ SIMPLE_OPTIONS = {
'help': 'use Microsoft Visual C++ link-time code generation' 'help': 'use Microsoft Visual C++ link-time code generation'
}, },
'simulator': { 'simulator': {
'values': ['arm', 'none'], 'values': ['arm', 'mips', 'none'],
'default': 'none', 'default': 'none',
'help': 'build with simulator' 'help': 'build with simulator'
}, },
@ -871,6 +897,11 @@ def PostprocessOptions(options):
options['armvariant'] = 'arm' options['armvariant'] = 'arm'
if (options['armvariant'] != 'none' and options['arch'] != 'arm'): if (options['armvariant'] != 'none' and options['arch'] != 'arm'):
options['armvariant'] = 'none' options['armvariant'] = 'none'
if options['arch'] == 'mips':
if ('regexp' in ARGUMENTS) and options['regexp'] == 'native':
# Print a warning if native regexp is specified for mips
print "Warning: forcing regexp to interpreted for mips"
options['regexp'] = 'interpreted'
def ParseEnvOverrides(arg, imports): def ParseEnvOverrides(arg, imports):

122
deps/v8/include/v8.h

@ -534,51 +534,76 @@ class V8EXPORT ScriptOrigin {
class V8EXPORT Script { class V8EXPORT Script {
public: public:
/** /**
* Compiles the specified script. The ScriptOrigin* and ScriptData* * Compiles the specified script (context-independent).
* parameters are owned by the caller of Script::Compile. No *
* references to these objects are kept after compilation finishes. * \param source Script source code.
* * \param origin Script origin, owned by caller, no references are kept
* The script object returned is context independent; when run it * when New() returns
* will use the currently entered context. * \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile()
*/ * using pre_data speeds compilation if it's done multiple times.
static Local<Script> New(Handle<String> source, * Owned by caller, no references are kept when New() returns.
ScriptOrigin* origin = NULL, * \param script_data Arbitrary data associated with script. Using
ScriptData* pre_data = NULL); * this has same effect as calling SetData(), but allows data to be
* available to compile event handlers.
* \return Compiled script object (context independent; when run it
* will use the currently entered context).
*/
static Local<Script> New(Handle<String> source,
ScriptOrigin* origin = NULL,
ScriptData* pre_data = NULL,
Handle<String> script_data = Handle<String>());
/** /**
* Compiles the specified script using the specified file name * Compiles the specified script using the specified file name
* object (typically a string) as the script's origin. * object (typically a string) as the script's origin.
* *
* The script object returned is context independent; when run it * \param source Script source code.
* will use the currently entered context. * \patam file_name file name object (typically a string) to be used
*/ * as the script's origin.
static Local<Script> New(Handle<String> source, * \return Compiled script object (context independent; when run it
Handle<Value> file_name); * will use the currently entered context).
*/
/** static Local<Script> New(Handle<String> source,
* Compiles the specified script. The ScriptOrigin* and ScriptData* Handle<Value> file_name);
* parameters are owned by the caller of Script::Compile. No
* references to these objects are kept after compilation finishes. /**
* Compiles the specified script (bound to current context).
* *
* The script object returned is bound to the context that was active * \param source Script source code.
* when this function was called. When run it will always use this * \param origin Script origin, owned by caller, no references are kept
* context. * when Compile() returns
* \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile()
* using pre_data speeds compilation if it's done multiple times.
* Owned by caller, no references are kept when Compile() returns.
* \param script_data Arbitrary data associated with script. Using
* this has same effect as calling SetData(), but makes data available
* earlier (i.e. to compile event handlers).
* \return Compiled script object, bound to the context that was active
* when this function was called. When run it will always use this
* context.
*/ */
static Local<Script> Compile(Handle<String> source, static Local<Script> Compile(Handle<String> source,
ScriptOrigin* origin = NULL, ScriptOrigin* origin = NULL,
ScriptData* pre_data = NULL); ScriptData* pre_data = NULL,
Handle<String> script_data = Handle<String>());
/** /**
* Compiles the specified script using the specified file name * Compiles the specified script using the specified file name
* object (typically a string) as the script's origin. * object (typically a string) as the script's origin.
* *
* The script object returned is bound to the context that was active * \param source Script source code.
* when this function was called. When run it will always use this * \param file_name File name to use as script's origin
* context. * \param script_data Arbitrary data associated with script. Using
* this has same effect as calling SetData(), but makes data available
* earlier (i.e. to compile event handlers).
* \return Compiled script object, bound to the context that was active
* when this function was called. When run it will always use this
* context.
*/ */
static Local<Script> Compile(Handle<String> source, static Local<Script> Compile(Handle<String> source,
Handle<Value> file_name); Handle<Value> file_name,
Handle<String> script_data = Handle<String>());
/** /**
* Runs the script returning the resulting value. If the script is * Runs the script returning the resulting value. If the script is
@ -1196,6 +1221,13 @@ class V8EXPORT Object : public Value {
*/ */
Local<Value> GetPrototype(); Local<Value> GetPrototype();
/**
* Set the prototype object. This does not skip objects marked to
* be skipped by __proto__ and it does not consult the security
* handler.
*/
bool SetPrototype(Handle<Value> prototype);
/** /**
* Finds an instance of the given function template in the prototype * Finds an instance of the given function template in the prototype
* chain. * chain.
@ -1354,7 +1386,15 @@ class V8EXPORT Function : public Object {
Local<Value> Call(Handle<Object> recv, int argc, Handle<Value> argv[]); Local<Value> Call(Handle<Object> recv, int argc, Handle<Value> argv[]);
void SetName(Handle<String> name); void SetName(Handle<String> name);
Handle<Value> GetName() const; Handle<Value> GetName() const;
/**
* Returns zero based line number of function body and
* kLineOffsetNotFound if no information available.
*/
int GetScriptLineNumber() const;
ScriptOrigin GetScriptOrigin() const;
static inline Function* Cast(Value* obj); static inline Function* Cast(Value* obj);
static const int kLineOffsetNotFound;
private: private:
Function(); Function();
static void CheckCast(Value* obj); static void CheckCast(Value* obj);
@ -2309,22 +2349,30 @@ class V8EXPORT V8 {
static bool IsProfilerPaused(); static bool IsProfilerPaused();
/** /**
* Resumes specified profiler modules. * Resumes specified profiler modules. Can be called several times to
* mark the opening of a profiler events block with the given tag.
*
* "ResumeProfiler" is equivalent to "ResumeProfilerEx(PROFILER_MODULE_CPU)". * "ResumeProfiler" is equivalent to "ResumeProfilerEx(PROFILER_MODULE_CPU)".
* See ProfilerModules enum. * See ProfilerModules enum.
* *
* \param flags Flags specifying profiler modules. * \param flags Flags specifying profiler modules.
* \param tag Profile tag.
*/ */
static void ResumeProfilerEx(int flags); static void ResumeProfilerEx(int flags, int tag = 0);
/** /**
* Pauses specified profiler modules. * Pauses specified profiler modules. Each call to "PauseProfilerEx" closes
* a block of profiler events opened by a call to "ResumeProfilerEx" with the
* same tag value. There is no need for blocks to be properly nested.
* The profiler is paused when the last opened block is closed.
*
* "PauseProfiler" is equivalent to "PauseProfilerEx(PROFILER_MODULE_CPU)". * "PauseProfiler" is equivalent to "PauseProfilerEx(PROFILER_MODULE_CPU)".
* See ProfilerModules enum. * See ProfilerModules enum.
* *
* \param flags Flags specifying profiler modules. * \param flags Flags specifying profiler modules.
* \param tag Profile tag.
*/ */
static void PauseProfilerEx(int flags); static void PauseProfilerEx(int flags, int tag = 0);
/** /**
* Returns active (resumed) profiler modules. * Returns active (resumed) profiler modules.

2
deps/v8/samples/lineprocessor.cc

@ -152,7 +152,7 @@ int RunMain(int argc, char* argv[]) {
} else if (strcmp(str, "--main-cycle-in-js") == 0) { } else if (strcmp(str, "--main-cycle-in-js") == 0) {
cycle_type = CycleInJs; cycle_type = CycleInJs;
} else if (strcmp(str, "-p") == 0 && i + 1 < argc) { } else if (strcmp(str, "-p") == 0 && i + 1 < argc) {
port_number = atoi(argv[i + 1]); port_number = atoi(argv[i + 1]); // NOLINT
i++; i++;
} else if (strncmp(str, "--", 2) == 0) { } else if (strncmp(str, "--", 2) == 0) {
printf("Warning: unknown flag %s.\nTry --help for options\n", str); printf("Warning: unknown flag %s.\nTry --help for options\n", str);

20
deps/v8/src/SConscript

@ -72,6 +72,7 @@ SOURCES = {
interpreter-irregexp.cc interpreter-irregexp.cc
jsregexp.cc jsregexp.cc
jump-target.cc jump-target.cc
liveedit.cc
log-utils.cc log-utils.cc
log.cc log.cc
mark-compact.cc mark-compact.cc
@ -131,6 +132,24 @@ SOURCES = {
'armvariant:thumb2': Split(""" 'armvariant:thumb2': Split("""
arm/assembler-thumb2.cc arm/assembler-thumb2.cc
"""), """),
'arch:mips': Split("""
mips/assembler-mips.cc
mips/builtins-mips.cc
mips/codegen-mips.cc
mips/constants-mips.cc
mips/cpu-mips.cc
mips/debug-mips.cc
mips/disasm-mips.cc
mips/fast-codegen-mips.cc
mips/full-codegen-mips.cc
mips/frames-mips.cc
mips/ic-mips.cc
mips/jump-target-mips.cc
mips/macro-assembler-mips.cc
mips/register-allocator-mips.cc
mips/stub-cache-mips.cc
mips/virtual-frame-mips.cc
"""),
'arch:ia32': Split(""" 'arch:ia32': Split("""
ia32/assembler-ia32.cc ia32/assembler-ia32.cc
ia32/builtins-ia32.cc ia32/builtins-ia32.cc
@ -168,6 +187,7 @@ SOURCES = {
x64/virtual-frame-x64.cc x64/virtual-frame-x64.cc
"""), """),
'simulator:arm': ['arm/simulator-arm.cc'], 'simulator:arm': ['arm/simulator-arm.cc'],
'simulator:mips': ['mips/simulator-mips.cc'],
'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'], 'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
'os:openbsd': ['platform-openbsd.cc', 'platform-posix.cc'], 'os:openbsd': ['platform-openbsd.cc', 'platform-posix.cc'],
'os:linux': ['platform-linux.cc', 'platform-posix.cc'], 'os:linux': ['platform-linux.cc', 'platform-posix.cc'],

37
deps/v8/src/accessors.cc

@ -647,42 +647,9 @@ Object* Accessors::ObjectGetPrototype(Object* receiver, void*) {
Object* Accessors::ObjectSetPrototype(JSObject* receiver, Object* Accessors::ObjectSetPrototype(JSObject* receiver,
Object* value, Object* value,
void*) { void*) {
// Before we can set the prototype we need to be sure const bool skip_hidden_prototypes = true;
// prototype cycles are prevented.
// It is sufficient to validate that the receiver is not in the new prototype
// chain.
// Silently ignore the change if value is not a JSObject or null.
// SpiderMonkey behaves this way.
if (!value->IsJSObject() && !value->IsNull()) return value;
for (Object* pt = value; pt != Heap::null_value(); pt = pt->GetPrototype()) {
if (JSObject::cast(pt) == receiver) {
// Cycle detected.
HandleScope scope;
return Top::Throw(*Factory::NewError("cyclic_proto",
HandleVector<Object>(NULL, 0)));
}
}
// Find the first object in the chain whose prototype object is not
// hidden and set the new prototype on that object.
JSObject* current = receiver;
Object* current_proto = receiver->GetPrototype();
while (current_proto->IsJSObject() &&
JSObject::cast(current_proto)->map()->is_hidden_prototype()) {
current = JSObject::cast(current_proto);
current_proto = current_proto->GetPrototype();
}
// Set the new prototype of the object.
Object* new_map = current->map()->CopyDropTransitions();
if (new_map->IsFailure()) return new_map;
Map::cast(new_map)->set_prototype(value);
current->set_map(Map::cast(new_map));
// To be consistent with other Set functions, return the value. // To be consistent with other Set functions, return the value.
return value; return receiver->SetPrototype(value, skip_hidden_prototypes);
} }

164
deps/v8/src/api.cc

@ -1106,7 +1106,8 @@ ScriptData* ScriptData::New(unsigned* data, int length) {
Local<Script> Script::New(v8::Handle<String> source, Local<Script> Script::New(v8::Handle<String> source,
v8::ScriptOrigin* origin, v8::ScriptOrigin* origin,
v8::ScriptData* script_data) { v8::ScriptData* pre_data,
v8::Handle<String> script_data) {
ON_BAILOUT("v8::Script::New()", return Local<Script>()); ON_BAILOUT("v8::Script::New()", return Local<Script>());
LOG_API("Script::New"); LOG_API("Script::New");
ENTER_V8; ENTER_V8;
@ -1126,20 +1127,17 @@ Local<Script> Script::New(v8::Handle<String> source,
} }
} }
EXCEPTION_PREAMBLE(); EXCEPTION_PREAMBLE();
i::ScriptDataImpl* pre_data = static_cast<i::ScriptDataImpl*>(script_data); i::ScriptDataImpl* pre_data_impl = static_cast<i::ScriptDataImpl*>(pre_data);
// We assert that the pre-data is sane, even though we can actually // We assert that the pre-data is sane, even though we can actually
// handle it if it turns out not to be in release mode. // handle it if it turns out not to be in release mode.
ASSERT(pre_data == NULL || pre_data->SanityCheck()); ASSERT(pre_data_impl == NULL || pre_data_impl->SanityCheck());
// If the pre-data isn't sane we simply ignore it // If the pre-data isn't sane we simply ignore it
if (pre_data != NULL && !pre_data->SanityCheck()) { if (pre_data_impl != NULL && !pre_data_impl->SanityCheck()) {
pre_data = NULL; pre_data_impl = NULL;
} }
i::Handle<i::JSFunction> boilerplate = i::Compiler::Compile(str, i::Handle<i::JSFunction> boilerplate =
name_obj, i::Compiler::Compile(str, name_obj, line_offset, column_offset, NULL,
line_offset, pre_data_impl, Utils::OpenHandle(*script_data));
column_offset,
NULL,
pre_data);
has_pending_exception = boilerplate.is_null(); has_pending_exception = boilerplate.is_null();
EXCEPTION_BAILOUT_CHECK(Local<Script>()); EXCEPTION_BAILOUT_CHECK(Local<Script>());
return Local<Script>(ToApi<Script>(boilerplate)); return Local<Script>(ToApi<Script>(boilerplate));
@ -1155,11 +1153,12 @@ Local<Script> Script::New(v8::Handle<String> source,
Local<Script> Script::Compile(v8::Handle<String> source, Local<Script> Script::Compile(v8::Handle<String> source,
v8::ScriptOrigin* origin, v8::ScriptOrigin* origin,
v8::ScriptData* script_data) { v8::ScriptData* pre_data,
v8::Handle<String> script_data) {
ON_BAILOUT("v8::Script::Compile()", return Local<Script>()); ON_BAILOUT("v8::Script::Compile()", return Local<Script>());
LOG_API("Script::Compile"); LOG_API("Script::Compile");
ENTER_V8; ENTER_V8;
Local<Script> generic = New(source, origin, script_data); Local<Script> generic = New(source, origin, pre_data, script_data);
if (generic.IsEmpty()) if (generic.IsEmpty())
return generic; return generic;
i::Handle<i::JSFunction> boilerplate = Utils::OpenHandle(*generic); i::Handle<i::JSFunction> boilerplate = Utils::OpenHandle(*generic);
@ -1171,9 +1170,10 @@ Local<Script> Script::Compile(v8::Handle<String> source,
Local<Script> Script::Compile(v8::Handle<String> source, Local<Script> Script::Compile(v8::Handle<String> source,
v8::Handle<Value> file_name) { v8::Handle<Value> file_name,
v8::Handle<String> script_data) {
ScriptOrigin origin(file_name); ScriptOrigin origin(file_name);
return Compile(source, &origin); return Compile(source, &origin, 0, script_data);
} }
@ -2032,6 +2032,19 @@ Local<Value> v8::Object::GetPrototype() {
} }
bool v8::Object::SetPrototype(Handle<Value> value) {
ON_BAILOUT("v8::Object::SetPrototype()", return false);
ENTER_V8;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
EXCEPTION_PREAMBLE();
i::Handle<i::Object> result = i::SetPrototype(self, value_obj);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(false);
return true;
}
Local<Object> v8::Object::FindInstanceInPrototypeChain( Local<Object> v8::Object::FindInstanceInPrototypeChain(
v8::Handle<FunctionTemplate> tmpl) { v8::Handle<FunctionTemplate> tmpl) {
ON_BAILOUT("v8::Object::FindInstanceInPrototypeChain()", ON_BAILOUT("v8::Object::FindInstanceInPrototypeChain()",
@ -2194,7 +2207,7 @@ Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
i::Handle<i::String> key_obj = Utils::OpenHandle(*key); i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::LookupResult lookup; i::LookupResult lookup;
self_obj->LookupRealNamedPropertyInPrototypes(*key_obj, &lookup); self_obj->LookupRealNamedPropertyInPrototypes(*key_obj, &lookup);
if (lookup.IsValid()) { if (lookup.IsProperty()) {
PropertyAttributes attributes; PropertyAttributes attributes;
i::Handle<i::Object> result(self_obj->GetProperty(*self_obj, i::Handle<i::Object> result(self_obj->GetProperty(*self_obj,
&lookup, &lookup,
@ -2213,7 +2226,7 @@ Local<Value> v8::Object::GetRealNamedProperty(Handle<String> key) {
i::Handle<i::String> key_obj = Utils::OpenHandle(*key); i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::LookupResult lookup; i::LookupResult lookup;
self_obj->LookupRealNamedProperty(*key_obj, &lookup); self_obj->LookupRealNamedProperty(*key_obj, &lookup);
if (lookup.IsValid()) { if (lookup.IsProperty()) {
PropertyAttributes attributes; PropertyAttributes attributes;
i::Handle<i::Object> result(self_obj->GetProperty(*self_obj, i::Handle<i::Object> result(self_obj->GetProperty(*self_obj,
&lookup, &lookup,
@ -2445,6 +2458,99 @@ Handle<Value> Function::GetName() const {
} }
ScriptOrigin Function::GetScriptOrigin() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
if (func->shared()->script()->IsScript()) {
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
v8::ScriptOrigin origin(
Utils::ToLocal(i::Handle<i::Object>(script->name())),
v8::Integer::New(script->line_offset()->value()),
v8::Integer::New(script->column_offset()->value()));
return origin;
}
return v8::ScriptOrigin(Handle<Value>());
}
const int Function::kLineOffsetNotFound = -1;
int Function::GetScriptLineNumber() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
if (func->shared()->script()->IsScript()) {
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
return i::GetScriptLineNumber(script, func->shared()->start_position());
}
return kLineOffsetNotFound;
}
namespace {
// Tracks string usage to help make better decisions when
// externalizing strings.
//
// Implementation note: internally this class only tracks fresh
// strings and keeps a single use counter for them.
class StringTracker {
public:
// Records that the given string's characters were copied to some
// external buffer. If this happens often we should honor
// externalization requests for the string.
static void RecordWrite(i::Handle<i::String> string) {
i::Address address = reinterpret_cast<i::Address>(*string);
i::Address top = i::Heap::NewSpaceTop();
if (IsFreshString(address, top)) {
IncrementUseCount(top);
}
}
// Estimates freshness and use frequency of the given string based
// on how close it is to the new space top and the recorded usage
// history.
static inline bool IsFreshUnusedString(i::Handle<i::String> string) {
i::Address address = reinterpret_cast<i::Address>(*string);
i::Address top = i::Heap::NewSpaceTop();
return IsFreshString(address, top) && IsUseCountLow(top);
}
private:
static inline bool IsFreshString(i::Address string, i::Address top) {
return top - kFreshnessLimit <= string && string <= top;
}
static inline bool IsUseCountLow(i::Address top) {
if (last_top_ != top) return true;
return use_count_ < kUseLimit;
}
static inline void IncrementUseCount(i::Address top) {
if (last_top_ != top) {
use_count_ = 0;
last_top_ = top;
}
++use_count_;
}
// How close to the new space top a fresh string has to be.
static const int kFreshnessLimit = 1024;
// The number of uses required to consider a string useful.
static const int kUseLimit = 32;
// Single use counter shared by all fresh strings.
static int use_count_;
// Last new space top when the use count above was valid.
static i::Address last_top_;
};
int StringTracker::use_count_ = 0;
i::Address StringTracker::last_top_ = NULL;
} // namespace
int String::Length() const { int String::Length() const {
if (IsDeadCheck("v8::String::Length()")) return 0; if (IsDeadCheck("v8::String::Length()")) return 0;
return Utils::OpenHandle(this)->length(); return Utils::OpenHandle(this)->length();
@ -2462,6 +2568,7 @@ int String::WriteUtf8(char* buffer, int capacity) const {
LOG_API("String::WriteUtf8"); LOG_API("String::WriteUtf8");
ENTER_V8; ENTER_V8;
i::Handle<i::String> str = Utils::OpenHandle(this); i::Handle<i::String> str = Utils::OpenHandle(this);
StringTracker::RecordWrite(str);
write_input_buffer.Reset(0, *str); write_input_buffer.Reset(0, *str);
int len = str->length(); int len = str->length();
// Encode the first K - 3 bytes directly into the buffer since we // Encode the first K - 3 bytes directly into the buffer since we
@ -2505,6 +2612,7 @@ int String::WriteAscii(char* buffer, int start, int length) const {
ENTER_V8; ENTER_V8;
ASSERT(start >= 0 && length >= -1); ASSERT(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(this); i::Handle<i::String> str = Utils::OpenHandle(this);
StringTracker::RecordWrite(str);
// Flatten the string for efficiency. This applies whether we are // Flatten the string for efficiency. This applies whether we are
// using StringInputBuffer or Get(i) to access the characters. // using StringInputBuffer or Get(i) to access the characters.
str->TryFlattenIfNotFlat(); str->TryFlattenIfNotFlat();
@ -2531,6 +2639,7 @@ int String::Write(uint16_t* buffer, int start, int length) const {
ENTER_V8; ENTER_V8;
ASSERT(start >= 0 && length >= -1); ASSERT(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(this); i::Handle<i::String> str = Utils::OpenHandle(this);
StringTracker::RecordWrite(str);
int end = length; int end = length;
if ( (length == -1) || (length > str->length() - start) ) if ( (length == -1) || (length > str->length() - start) )
end = str->length() - start; end = str->length() - start;
@ -3098,6 +3207,7 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
if (this->IsExternal()) return false; // Already an external string. if (this->IsExternal()) return false; // Already an external string.
ENTER_V8; ENTER_V8;
i::Handle<i::String> obj = Utils::OpenHandle(this); i::Handle<i::String> obj = Utils::OpenHandle(this);
if (StringTracker::IsFreshUnusedString(obj)) return false;
bool result = obj->MakeExternal(resource); bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) { if (result && !obj->IsSymbol()) {
i::ExternalStringTable::AddString(*obj); i::ExternalStringTable::AddString(*obj);
@ -3123,6 +3233,7 @@ bool v8::String::MakeExternal(
if (this->IsExternal()) return false; // Already an external string. if (this->IsExternal()) return false; // Already an external string.
ENTER_V8; ENTER_V8;
i::Handle<i::String> obj = Utils::OpenHandle(this); i::Handle<i::String> obj = Utils::OpenHandle(this);
if (StringTracker::IsFreshUnusedString(obj)) return false;
bool result = obj->MakeExternal(resource); bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) { if (result && !obj->IsSymbol()) {
i::ExternalStringTable::AddString(*obj); i::ExternalStringTable::AddString(*obj);
@ -3134,6 +3245,7 @@ bool v8::String::MakeExternal(
bool v8::String::CanMakeExternal() { bool v8::String::CanMakeExternal() {
if (IsDeadCheck("v8::String::CanMakeExternal()")) return false; if (IsDeadCheck("v8::String::CanMakeExternal()")) return false;
i::Handle<i::String> obj = Utils::OpenHandle(this); i::Handle<i::String> obj = Utils::OpenHandle(this);
if (StringTracker::IsFreshUnusedString(obj)) return false;
int size = obj->Size(); // Byte size of the original string. int size = obj->Size(); // Byte size of the original string.
if (size < i::ExternalString::kSize) if (size < i::ExternalString::kSize)
return false; return false;
@ -3357,14 +3469,14 @@ void V8::SetGlobalGCEpilogueCallback(GCCallback callback) {
void V8::PauseProfiler() { void V8::PauseProfiler() {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
i::Logger::PauseProfiler(PROFILER_MODULE_CPU); PauseProfilerEx(PROFILER_MODULE_CPU);
#endif #endif
} }
void V8::ResumeProfiler() { void V8::ResumeProfiler() {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
i::Logger::ResumeProfiler(PROFILER_MODULE_CPU); ResumeProfilerEx(PROFILER_MODULE_CPU);
#endif #endif
} }
@ -3378,7 +3490,7 @@ bool V8::IsProfilerPaused() {
} }
void V8::ResumeProfilerEx(int flags) { void V8::ResumeProfilerEx(int flags, int tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
if (flags & PROFILER_MODULE_HEAP_SNAPSHOT) { if (flags & PROFILER_MODULE_HEAP_SNAPSHOT) {
// Snapshot mode: resume modules, perform GC, then pause only // Snapshot mode: resume modules, perform GC, then pause only
@ -3388,19 +3500,19 @@ void V8::ResumeProfilerEx(int flags) {
// Reset snapshot flag and CPU module flags. // Reset snapshot flag and CPU module flags.
flags &= ~(PROFILER_MODULE_HEAP_SNAPSHOT | PROFILER_MODULE_CPU); flags &= ~(PROFILER_MODULE_HEAP_SNAPSHOT | PROFILER_MODULE_CPU);
const int current_flags = i::Logger::GetActiveProfilerModules(); const int current_flags = i::Logger::GetActiveProfilerModules();
i::Logger::ResumeProfiler(flags); i::Logger::ResumeProfiler(flags, tag);
i::Heap::CollectAllGarbage(false); i::Heap::CollectAllGarbage(false);
i::Logger::PauseProfiler(~current_flags & flags); i::Logger::PauseProfiler(~current_flags & flags, tag);
} else { } else {
i::Logger::ResumeProfiler(flags); i::Logger::ResumeProfiler(flags, tag);
} }
#endif #endif
} }
void V8::PauseProfilerEx(int flags) { void V8::PauseProfilerEx(int flags, int tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
i::Logger::PauseProfiler(flags); i::Logger::PauseProfiler(flags, tag);
#endif #endif
} }

191
deps/v8/src/arm/assembler-arm.cc

@ -51,9 +51,14 @@ void CpuFeatures::Probe() {
// If the compiler is allowed to use vfp then we can use vfp too in our // If the compiler is allowed to use vfp then we can use vfp too in our
// code generation. // code generation.
#if !defined(__arm__) #if !defined(__arm__)
// For the simulator=arm build, always use VFP since the arm simulator has // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
// VFP support. if (FLAG_enable_vfp3) {
supported_ |= 1u << VFP3; supported_ |= 1u << VFP3;
}
// For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
if (FLAG_enable_armv7) {
supported_ |= 1u << ARMv7;
}
#else #else
if (Serializer::enabled()) { if (Serializer::enabled()) {
supported_ |= OS::CpuFeaturesImpliedByPlatform(); supported_ |= OS::CpuFeaturesImpliedByPlatform();
@ -66,6 +71,11 @@ void CpuFeatures::Probe() {
supported_ |= 1u << VFP3; supported_ |= 1u << VFP3;
found_by_runtime_probing_ |= 1u << VFP3; found_by_runtime_probing_ |= 1u << VFP3;
} }
if (OS::ArmCpuHasFeature(ARMv7)) {
supported_ |= 1u << ARMv7;
found_by_runtime_probing_ |= 1u << ARMv7;
}
#endif #endif
} }
@ -83,9 +93,9 @@ Register r4 = { 4 };
Register r5 = { 5 }; Register r5 = { 5 };
Register r6 = { 6 }; Register r6 = { 6 };
Register r7 = { 7 }; Register r7 = { 7 };
Register r8 = { 8 }; Register r8 = { 8 }; // Used as context register.
Register r9 = { 9 }; Register r9 = { 9 };
Register r10 = { 10 }; Register r10 = { 10 }; // Used as roots register.
Register fp = { 11 }; Register fp = { 11 };
Register ip = { 12 }; Register ip = { 12 };
Register sp = { 13 }; Register sp = { 13 };
@ -264,9 +274,9 @@ MemOperand::MemOperand(Register rn, Register rm,
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Implementation of Assembler // Implementation of Assembler.
// Instruction encoding bits // Instruction encoding bits.
enum { enum {
H = 1 << 5, // halfword (or byte) H = 1 << 5, // halfword (or byte)
S6 = 1 << 6, // signed (or unsigned) S6 = 1 << 6, // signed (or unsigned)
@ -299,14 +309,14 @@ enum {
B26 = 1 << 26, B26 = 1 << 26,
B27 = 1 << 27, B27 = 1 << 27,
// Instruction bit masks // Instruction bit masks.
RdMask = 15 << 12, // in str instruction RdMask = 15 << 12, // in str instruction
CondMask = 15 << 28, CondMask = 15 << 28,
CoprocessorMask = 15 << 8, CoprocessorMask = 15 << 8,
OpCodeMask = 15 << 21, // in data-processing instructions OpCodeMask = 15 << 21, // in data-processing instructions
Imm24Mask = (1 << 24) - 1, Imm24Mask = (1 << 24) - 1,
Off12Mask = (1 << 12) - 1, Off12Mask = (1 << 12) - 1,
// Reserved condition // Reserved condition.
nv = 15 << 28 nv = 15 << 28
}; };
@ -327,13 +337,13 @@ const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
// ldr pc, [pc, #XXX] // ldr pc, [pc, #XXX]
const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16; const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
// spare_buffer_ // Spare buffer.
static const int kMinimalBufferSize = 4*KB; static const int kMinimalBufferSize = 4*KB;
static byte* spare_buffer_ = NULL; static byte* spare_buffer_ = NULL;
Assembler::Assembler(void* buffer, int buffer_size) { Assembler::Assembler(void* buffer, int buffer_size) {
if (buffer == NULL) { if (buffer == NULL) {
// do our own buffer management // Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) { if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize; buffer_size = kMinimalBufferSize;
@ -351,14 +361,14 @@ Assembler::Assembler(void* buffer, int buffer_size) {
own_buffer_ = true; own_buffer_ = true;
} else { } else {
// use externally provided buffer instead // Use externally provided buffer instead.
ASSERT(buffer_size > 0); ASSERT(buffer_size > 0);
buffer_ = static_cast<byte*>(buffer); buffer_ = static_cast<byte*>(buffer);
buffer_size_ = buffer_size; buffer_size_ = buffer_size;
own_buffer_ = false; own_buffer_ = false;
} }
// setup buffer pointers // Setup buffer pointers.
ASSERT(buffer_ != NULL); ASSERT(buffer_ != NULL);
pc_ = buffer_; pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@ -386,11 +396,11 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) { void Assembler::GetCode(CodeDesc* desc) {
// emit constant pool if necessary // Emit constant pool if necessary.
CheckConstPool(true, false); CheckConstPool(true, false);
ASSERT(num_prinfo_ == 0); ASSERT(num_prinfo_ == 0);
// setup desc // Setup code descriptor.
desc->buffer = buffer_; desc->buffer = buffer_;
desc->buffer_size = buffer_size_; desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset(); desc->instr_size = pc_offset();
@ -539,7 +549,7 @@ void Assembler::bind_to(Label* L, int pos) {
void Assembler::link_to(Label* L, Label* appendix) { void Assembler::link_to(Label* L, Label* appendix) {
if (appendix->is_linked()) { if (appendix->is_linked()) {
if (L->is_linked()) { if (L->is_linked()) {
// append appendix to L's list // Append appendix to L's list.
int fixup_pos; int fixup_pos;
int link = L->pos(); int link = L->pos();
do { do {
@ -549,7 +559,7 @@ void Assembler::link_to(Label* L, Label* appendix) {
ASSERT(link == kEndOfChain); ASSERT(link == kEndOfChain);
target_at_put(fixup_pos, appendix->pos()); target_at_put(fixup_pos, appendix->pos());
} else { } else {
// L is empty, simply use appendix // L is empty, simply use appendix.
*L = *appendix; *L = *appendix;
} }
} }
@ -575,12 +585,12 @@ void Assembler::next(Label* L) {
} }
// Low-level code emission routines depending on the addressing mode // Low-level code emission routines depending on the addressing mode.
static bool fits_shifter(uint32_t imm32, static bool fits_shifter(uint32_t imm32,
uint32_t* rotate_imm, uint32_t* rotate_imm,
uint32_t* immed_8, uint32_t* immed_8,
Instr* instr) { Instr* instr) {
// imm32 must be unsigned // imm32 must be unsigned.
for (int rot = 0; rot < 16; rot++) { for (int rot = 0; rot < 16; rot++) {
uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot)); uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
if ((imm8 <= 0xff)) { if ((imm8 <= 0xff)) {
@ -589,7 +599,7 @@ static bool fits_shifter(uint32_t imm32,
return true; return true;
} }
} }
// if the opcode is mov or mvn and if ~imm32 fits, change the opcode // If the opcode is mov or mvn and if ~imm32 fits, change the opcode.
if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) { if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
*instr ^= 0x2*B21; *instr ^= 0x2*B21;
@ -626,7 +636,7 @@ void Assembler::addrmod1(Instr instr,
CheckBuffer(); CheckBuffer();
ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0); ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
if (!x.rm_.is_valid()) { if (!x.rm_.is_valid()) {
// immediate // Immediate.
uint32_t rotate_imm; uint32_t rotate_imm;
uint32_t immed_8; uint32_t immed_8;
if (MustUseIp(x.rmode_) || if (MustUseIp(x.rmode_) ||
@ -634,7 +644,7 @@ void Assembler::addrmod1(Instr instr,
// The immediate operand cannot be encoded as a shifter operand, so load // The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip. // it first to register ip and change the original instruction to use ip.
// However, if the original instruction is a 'mov rd, x' (not setting the // However, if the original instruction is a 'mov rd, x' (not setting the
// condition code), then replace it with a 'ldr rd, [pc]' // condition code), then replace it with a 'ldr rd, [pc]'.
RecordRelocInfo(x.rmode_, x.imm32_); RecordRelocInfo(x.rmode_, x.imm32_);
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = static_cast<Condition>(instr & CondMask); Condition cond = static_cast<Condition>(instr & CondMask);
@ -648,16 +658,16 @@ void Assembler::addrmod1(Instr instr,
} }
instr |= I | rotate_imm*B8 | immed_8; instr |= I | rotate_imm*B8 | immed_8;
} else if (!x.rs_.is_valid()) { } else if (!x.rs_.is_valid()) {
// immediate shift // Immediate shift.
instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
} else { } else {
// register shift // Register shift.
ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc)); ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code(); instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
} }
emit(instr | rn.code()*B16 | rd.code()*B12); emit(instr | rn.code()*B16 | rd.code()*B12);
if (rn.is(pc) || x.rm_.is(pc)) if (rn.is(pc) || x.rm_.is(pc))
// block constant pool emission for one instruction after reading pc // Block constant pool emission for one instruction after reading pc.
BlockConstPoolBefore(pc_offset() + kInstrSize); BlockConstPoolBefore(pc_offset() + kInstrSize);
} }
@ -666,15 +676,15 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
ASSERT((instr & ~(CondMask | B | L)) == B26); ASSERT((instr & ~(CondMask | B | L)) == B26);
int am = x.am_; int am = x.am_;
if (!x.rm_.is_valid()) { if (!x.rm_.is_valid()) {
// immediate offset // Immediate offset.
int offset_12 = x.offset_; int offset_12 = x.offset_;
if (offset_12 < 0) { if (offset_12 < 0) {
offset_12 = -offset_12; offset_12 = -offset_12;
am ^= U; am ^= U;
} }
if (!is_uint12(offset_12)) { if (!is_uint12(offset_12)) {
// immediate offset cannot be encoded, load it first to register ip // Immediate offset cannot be encoded, load it first to register ip
// rn (and rd in a load) should never be ip, or will be trashed // rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.offset_), LeaveCC, mov(ip, Operand(x.offset_), LeaveCC,
static_cast<Condition>(instr & CondMask)); static_cast<Condition>(instr & CondMask));
@ -684,9 +694,9 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
ASSERT(offset_12 >= 0); // no masking needed ASSERT(offset_12 >= 0); // no masking needed
instr |= offset_12; instr |= offset_12;
} else { } else {
// register offset (shift_imm_ and shift_op_ are 0) or scaled // Register offset (shift_imm_ and shift_op_ are 0) or scaled
// register offset the constructors make sure than both shift_imm_ // register offset the constructors make sure than both shift_imm_
// and shift_op_ are initialized // and shift_op_ are initialized.
ASSERT(!x.rm_.is(pc)); ASSERT(!x.rm_.is(pc));
instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
} }
@ -700,15 +710,15 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
ASSERT(x.rn_.is_valid()); ASSERT(x.rn_.is_valid());
int am = x.am_; int am = x.am_;
if (!x.rm_.is_valid()) { if (!x.rm_.is_valid()) {
// immediate offset // Immediate offset.
int offset_8 = x.offset_; int offset_8 = x.offset_;
if (offset_8 < 0) { if (offset_8 < 0) {
offset_8 = -offset_8; offset_8 = -offset_8;
am ^= U; am ^= U;
} }
if (!is_uint8(offset_8)) { if (!is_uint8(offset_8)) {
// immediate offset cannot be encoded, load it first to register ip // Immediate offset cannot be encoded, load it first to register ip
// rn (and rd in a load) should never be ip, or will be trashed // rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.offset_), LeaveCC, mov(ip, Operand(x.offset_), LeaveCC,
static_cast<Condition>(instr & CondMask)); static_cast<Condition>(instr & CondMask));
@ -718,15 +728,15 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
ASSERT(offset_8 >= 0); // no masking needed ASSERT(offset_8 >= 0); // no masking needed
instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf); instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
} else if (x.shift_imm_ != 0) { } else if (x.shift_imm_ != 0) {
// scaled register offset not supported, load index first // Scaled register offset not supported, load index first
// rn (and rd in a load) should never be ip, or will be trashed // rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC, mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
static_cast<Condition>(instr & CondMask)); static_cast<Condition>(instr & CondMask));
addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
return; return;
} else { } else {
// register offset // Register offset.
ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
instr |= x.rm_.code(); instr |= x.rm_.code();
} }
@ -744,7 +754,7 @@ void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) { void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
// unindexed addressing is not encoded by this function // Unindexed addressing is not encoded by this function.
ASSERT_EQ((B27 | B26), ASSERT_EQ((B27 | B26),
(instr & ~(CondMask | CoprocessorMask | P | U | N | W | L))); (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
ASSERT(x.rn_.is_valid() && !x.rm_.is_valid()); ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
@ -759,7 +769,7 @@ void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
// post-indexed addressing requires W == 1; different than in addrmod2/3 // Post-indexed addressing requires W == 1; different than in addrmod2/3.
if ((am & P) == 0) if ((am & P) == 0)
am |= W; am |= W;
@ -782,7 +792,7 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
} }
// Block the emission of the constant pool, since the branch instruction must // Block the emission of the constant pool, since the branch instruction must
// be emitted at the pc offset recorded by the label // be emitted at the pc offset recorded by the label.
BlockConstPoolBefore(pc_offset() + kInstrSize); BlockConstPoolBefore(pc_offset() + kInstrSize);
return target_pos - (pc_offset() + kPcLoadDelta); return target_pos - (pc_offset() + kPcLoadDelta);
} }
@ -804,7 +814,7 @@ void Assembler::label_at_put(Label* L, int at_offset) {
} }
// Branch instructions // Branch instructions.
void Assembler::b(int branch_offset, Condition cond) { void Assembler::b(int branch_offset, Condition cond) {
ASSERT((branch_offset & 3) == 0); ASSERT((branch_offset & 3) == 0);
int imm24 = branch_offset >> 2; int imm24 = branch_offset >> 2;
@ -812,7 +822,7 @@ void Assembler::b(int branch_offset, Condition cond) {
emit(cond | B27 | B25 | (imm24 & Imm24Mask)); emit(cond | B27 | B25 | (imm24 & Imm24Mask));
if (cond == al) if (cond == al)
// dead code is a good location to emit the constant pool // Dead code is a good location to emit the constant pool.
CheckConstPool(false, false); CheckConstPool(false, false);
} }
@ -849,7 +859,22 @@ void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
} }
// Data-processing instructions // Data-processing instructions.
// UBFX <Rd>,<Rn>,#<lsb>,#<width - 1>
// Instruction details available in ARM DDI 0406A, A8-464.
// cond(31-28) | 01111(27-23)| 1(22) | 1(21) | widthm1(20-16) |
// Rd(15-12) | lsb(11-7) | 101(6-4) | Rn(3-0)
void Assembler::ubfx(Register dst, Register src1, const Operand& src2,
const Operand& src3, Condition cond) {
ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid());
ASSERT(static_cast<uint32_t>(src2.imm32_) <= 0x1f);
ASSERT(static_cast<uint32_t>(src3.imm32_) <= 0x1f);
emit(cond | 0x3F*B21 | src3.imm32_*B16 |
dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code());
}
void Assembler::and_(Register dst, Register src1, const Operand& src2, void Assembler::and_(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | 0*B21 | s, src1, dst, src2); addrmod1(cond | 0*B21 | s, src1, dst, src2);
@ -886,7 +911,7 @@ void Assembler::add(Register dst, Register src1, const Operand& src2,
if (FLAG_push_pop_elimination && if (FLAG_push_pop_elimination &&
last_bound_pos_ <= (pc_offset() - pattern_size) && last_bound_pos_ <= (pc_offset() - pattern_size) &&
reloc_info_writer.last_pc() <= (pc_ - pattern_size) && reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
// pattern // Pattern.
instr_at(pc_ - 1 * kInstrSize) == kPopInstruction && instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
(instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) { (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
pc_ -= 2 * kInstrSize; pc_ -= 2 * kInstrSize;
@ -960,7 +985,7 @@ void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
} }
// Multiply instructions // Multiply instructions.
void Assembler::mla(Register dst, Register src1, Register src2, Register srcA, void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
SBit s, Condition cond) { SBit s, Condition cond) {
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc)); ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
@ -1029,7 +1054,7 @@ void Assembler::umull(Register dstL,
} }
// Miscellaneous arithmetic instructions // Miscellaneous arithmetic instructions.
void Assembler::clz(Register dst, Register src, Condition cond) { void Assembler::clz(Register dst, Register src, Condition cond) {
// v5 and above. // v5 and above.
ASSERT(!dst.is(pc) && !src.is(pc)); ASSERT(!dst.is(pc) && !src.is(pc));
@ -1038,7 +1063,7 @@ void Assembler::clz(Register dst, Register src, Condition cond) {
} }
// Status register access instructions // Status register access instructions.
void Assembler::mrs(Register dst, SRegister s, Condition cond) { void Assembler::mrs(Register dst, SRegister s, Condition cond) {
ASSERT(!dst.is(pc)); ASSERT(!dst.is(pc));
emit(cond | B24 | s | 15*B16 | dst.code()*B12); emit(cond | B24 | s | 15*B16 | dst.code()*B12);
@ -1050,12 +1075,12 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
ASSERT(fields >= B16 && fields < B20); // at least one field set ASSERT(fields >= B16 && fields < B20); // at least one field set
Instr instr; Instr instr;
if (!src.rm_.is_valid()) { if (!src.rm_.is_valid()) {
// immediate // Immediate.
uint32_t rotate_imm; uint32_t rotate_imm;
uint32_t immed_8; uint32_t immed_8;
if (MustUseIp(src.rmode_) || if (MustUseIp(src.rmode_) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) { !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// immediate operand cannot be encoded, load it first to register ip // Immediate operand cannot be encoded, load it first to register ip.
RecordRelocInfo(src.rmode_, src.imm32_); RecordRelocInfo(src.rmode_, src.imm32_);
ldr(ip, MemOperand(pc, 0), cond); ldr(ip, MemOperand(pc, 0), cond);
msr(fields, Operand(ip), cond); msr(fields, Operand(ip), cond);
@ -1070,7 +1095,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
} }
// Load/Store instructions // Load/Store instructions.
void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) { void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
if (dst.is(pc)) { if (dst.is(pc)) {
WriteRecordedPositions(); WriteRecordedPositions();
@ -1085,7 +1110,7 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
if (FLAG_push_pop_elimination && if (FLAG_push_pop_elimination &&
last_bound_pos_ <= (pc_offset() - pattern_size) && last_bound_pos_ <= (pc_offset() - pattern_size) &&
reloc_info_writer.last_pc() <= (pc_ - pattern_size) && reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
// pattern // Pattern.
instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) && instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) { instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
pc_ -= 2 * kInstrSize; pc_ -= 2 * kInstrSize;
@ -1106,6 +1131,7 @@ void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
if (FLAG_push_pop_elimination && if (FLAG_push_pop_elimination &&
last_bound_pos_ <= (pc_offset() - pattern_size) && last_bound_pos_ <= (pc_offset() - pattern_size) &&
reloc_info_writer.last_pc() <= (pc_ - pattern_size) && reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
// Pattern.
instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) && instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) { instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
pc_ -= 2 * kInstrSize; pc_ -= 2 * kInstrSize;
@ -1147,17 +1173,17 @@ void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
} }
// Load/Store multiple instructions // Load/Store multiple instructions.
void Assembler::ldm(BlockAddrMode am, void Assembler::ldm(BlockAddrMode am,
Register base, Register base,
RegList dst, RegList dst,
Condition cond) { Condition cond) {
// ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
ASSERT(base.is(sp) || (dst & sp.bit()) == 0); ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
addrmod4(cond | B27 | am | L, base, dst); addrmod4(cond | B27 | am | L, base, dst);
// emit the constant pool after a function return implemented by ldm ..{..pc} // Emit the constant pool after a function return implemented by ldm ..{..pc}.
if (cond == al && (dst & pc.bit()) != 0) { if (cond == al && (dst & pc.bit()) != 0) {
// There is a slight chance that the ldm instruction was actually a call, // There is a slight chance that the ldm instruction was actually a call,
// in which case it would be wrong to return into the constant pool; we // in which case it would be wrong to return into the constant pool; we
@ -1177,7 +1203,7 @@ void Assembler::stm(BlockAddrMode am,
} }
// Semaphore instructions // Semaphore instructions.
void Assembler::swp(Register dst, Register src, Register base, Condition cond) { void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc)); ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
ASSERT(!dst.is(base) && !src.is(base)); ASSERT(!dst.is(base) && !src.is(base));
@ -1197,7 +1223,7 @@ void Assembler::swpb(Register dst,
} }
// Exception-generating instructions and debugging support // Exception-generating instructions and debugging support.
void Assembler::stop(const char* msg) { void Assembler::stop(const char* msg) {
#if !defined(__arm__) #if !defined(__arm__)
// The simulator handles these special instructions and stops execution. // The simulator handles these special instructions and stops execution.
@ -1222,7 +1248,7 @@ void Assembler::swi(uint32_t imm24, Condition cond) {
} }
// Coprocessor instructions // Coprocessor instructions.
void Assembler::cdp(Coprocessor coproc, void Assembler::cdp(Coprocessor coproc,
int opcode_1, int opcode_1,
CRegister crd, CRegister crd,
@ -1307,7 +1333,7 @@ void Assembler::ldc(Coprocessor coproc,
int option, int option,
LFlag l, LFlag l,
Condition cond) { Condition cond) {
// unindexed addressing // Unindexed addressing.
ASSERT(is_uint8(option)); ASSERT(is_uint8(option));
emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 | emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
coproc*B8 | (option & 255)); coproc*B8 | (option & 255));
@ -1346,7 +1372,7 @@ void Assembler::stc(Coprocessor coproc,
int option, int option,
LFlag l, LFlag l,
Condition cond) { Condition cond) {
// unindexed addressing // Unindexed addressing.
ASSERT(is_uint8(option)); ASSERT(is_uint8(option));
emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 | emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
coproc*B8 | (option & 255)); coproc*B8 | (option & 255));
@ -1464,7 +1490,7 @@ void Assembler::vcvt(const DwVfpRegister dst,
const Condition cond) { const Condition cond) {
// Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd). // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
// Instruction details available in ARM DDI 0406A, A8-576. // Instruction details available in ARM DDI 0406A, A8-576.
// cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) | // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=000(18-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 | emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
@ -1571,14 +1597,14 @@ void Assembler::vmrs(Register dst, Condition cond) {
} }
// Pseudo instructions // Pseudo instructions.
void Assembler::lea(Register dst, void Assembler::lea(Register dst,
const MemOperand& x, const MemOperand& x,
SBit s, SBit s,
Condition cond) { Condition cond) {
int am = x.am_; int am = x.am_;
if (!x.rm_.is_valid()) { if (!x.rm_.is_valid()) {
// immediate offset // Immediate offset.
if ((am & P) == 0) // post indexing if ((am & P) == 0) // post indexing
mov(dst, Operand(x.rn_), s, cond); mov(dst, Operand(x.rn_), s, cond);
else if ((am & U) == 0) // negative indexing else if ((am & U) == 0) // negative indexing
@ -1612,7 +1638,7 @@ void Assembler::BlockConstPoolFor(int instructions) {
} }
// Debugging // Debugging.
void Assembler::RecordJSReturn() { void Assembler::RecordJSReturn() {
WriteRecordedPositions(); WriteRecordedPositions();
CheckBuffer(); CheckBuffer();
@ -1665,7 +1691,7 @@ void Assembler::WriteRecordedPositions() {
void Assembler::GrowBuffer() { void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small"); if (!own_buffer_) FATAL("external code buffer is too small");
// compute new buffer size // Compute new buffer size.
CodeDesc desc; // the new buffer CodeDesc desc; // the new buffer
if (buffer_size_ < 4*KB) { if (buffer_size_ < 4*KB) {
desc.buffer_size = 4*KB; desc.buffer_size = 4*KB;
@ -1676,20 +1702,20 @@ void Assembler::GrowBuffer() {
} }
CHECK_GT(desc.buffer_size, 0); // no overflow CHECK_GT(desc.buffer_size, 0); // no overflow
// setup new buffer // Setup new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size); desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset(); desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
// copy the data // Copy the data.
int pc_delta = desc.buffer - buffer_; int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_); int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
memmove(desc.buffer, buffer_, desc.instr_size); memmove(desc.buffer, buffer_, desc.instr_size);
memmove(reloc_info_writer.pos() + rc_delta, memmove(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.pos(), desc.reloc_size); reloc_info_writer.pos(), desc.reloc_size);
// switch buffers // Switch buffers.
DeleteArray(buffer_); DeleteArray(buffer_);
buffer_ = desc.buffer; buffer_ = desc.buffer;
buffer_size_ = desc.buffer_size; buffer_size_ = desc.buffer_size;
@ -1697,11 +1723,11 @@ void Assembler::GrowBuffer() {
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta); reloc_info_writer.last_pc() + pc_delta);
// none of our relocation types are pc relative pointing outside the code // None of our relocation types are pc relative pointing outside the code
// buffer nor pc absolute pointing inside the code buffer, so there is no need // buffer nor pc absolute pointing inside the code buffer, so there is no need
// to relocate any emitted relocation entries // to relocate any emitted relocation entries.
// relocate pending relocation entries // Relocate pending relocation entries.
for (int i = 0; i < num_prinfo_; i++) { for (int i = 0; i < num_prinfo_; i++) {
RelocInfo& rinfo = prinfo_[i]; RelocInfo& rinfo = prinfo_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT && ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
@ -1716,16 +1742,16 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) { if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
// Adjust code for new modes // Adjust code for new modes.
ASSERT(RelocInfo::IsJSReturn(rmode) ASSERT(RelocInfo::IsJSReturn(rmode)
|| RelocInfo::IsComment(rmode) || RelocInfo::IsComment(rmode)
|| RelocInfo::IsPosition(rmode)); || RelocInfo::IsPosition(rmode));
// these modes do not need an entry in the constant pool // These modes do not need an entry in the constant pool.
} else { } else {
ASSERT(num_prinfo_ < kMaxNumPRInfo); ASSERT(num_prinfo_ < kMaxNumPRInfo);
prinfo_[num_prinfo_++] = rinfo; prinfo_[num_prinfo_++] = rinfo;
// Make sure the constant pool is not emitted in place of the next // Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info // instruction for which we just recorded relocation info.
BlockConstPoolBefore(pc_offset() + kInstrSize); BlockConstPoolBefore(pc_offset() + kInstrSize);
} }
if (rinfo.rmode() != RelocInfo::NONE) { if (rinfo.rmode() != RelocInfo::NONE) {
@ -1752,7 +1778,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// blocked for a specific range. // blocked for a specific range.
next_buffer_check_ = pc_offset() + kCheckConstInterval; next_buffer_check_ = pc_offset() + kCheckConstInterval;
// There is nothing to do if there are no pending relocation info entries // There is nothing to do if there are no pending relocation info entries.
if (num_prinfo_ == 0) return; if (num_prinfo_ == 0) return;
// We emit a constant pool at regular intervals of about kDistBetweenPools // We emit a constant pool at regular intervals of about kDistBetweenPools
@ -1778,10 +1804,11 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// no_const_pool_before_, which is checked here. Also, recursive calls to // no_const_pool_before_, which is checked here. Also, recursive calls to
// CheckConstPool are blocked by no_const_pool_before_. // CheckConstPool are blocked by no_const_pool_before_.
if (pc_offset() < no_const_pool_before_) { if (pc_offset() < no_const_pool_before_) {
// Emission is currently blocked; make sure we try again as soon as possible // Emission is currently blocked; make sure we try again as soon as
// possible.
next_buffer_check_ = no_const_pool_before_; next_buffer_check_ = no_const_pool_before_;
// Something is wrong if emission is forced and blocked at the same time // Something is wrong if emission is forced and blocked at the same time.
ASSERT(!force_emit); ASSERT(!force_emit);
return; return;
} }
@ -1795,23 +1822,23 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize); jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer(); while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
// Block recursive calls to CheckConstPool // Block recursive calls to CheckConstPool.
BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize + BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
num_prinfo_*kInstrSize); num_prinfo_*kInstrSize);
// Don't bother to check for the emit calls below. // Don't bother to check for the emit calls below.
next_buffer_check_ = no_const_pool_before_; next_buffer_check_ = no_const_pool_before_;
// Emit jump over constant pool if necessary // Emit jump over constant pool if necessary.
Label after_pool; Label after_pool;
if (require_jump) b(&after_pool); if (require_jump) b(&after_pool);
RecordComment("[ Constant Pool"); RecordComment("[ Constant Pool");
// Put down constant pool marker // Put down constant pool marker "Undefined instruction" as specified by
// "Undefined instruction" as specified by A3.1 Instruction set encoding // A3.1 Instruction set encoding.
emit(0x03000000 | num_prinfo_); emit(0x03000000 | num_prinfo_);
// Emit constant pool entries // Emit constant pool entries.
for (int i = 0; i < num_prinfo_; i++) { for (int i = 0; i < num_prinfo_; i++) {
RelocInfo& rinfo = prinfo_[i]; RelocInfo& rinfo = prinfo_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT && ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
@ -1819,8 +1846,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
rinfo.rmode() != RelocInfo::STATEMENT_POSITION); rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
Instr instr = instr_at(rinfo.pc()); Instr instr = instr_at(rinfo.pc());
// Instruction to patch must be a ldr/str [pc, #offset] // Instruction to patch must be a ldr/str [pc, #offset].
// P and U set, B and W clear, Rn == pc, offset12 still 0 // P and U set, B and W clear, Rn == pc, offset12 still 0.
ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) == ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
(2*B25 | P | U | pc.code()*B16)); (2*B25 | P | U | pc.code()*B16));
int delta = pc_ - rinfo.pc() - 8; int delta = pc_ - rinfo.pc() - 8;

9
deps/v8/src/arm/assembler-arm.h

@ -80,7 +80,7 @@ struct Register {
return 1 << code_; return 1 << code_;
} }
// (unfortunately we can't make this private in a struct) // Unfortunately we can't make this private in a struct.
int code_; int code_;
}; };
@ -205,7 +205,7 @@ struct CRegister {
return 1 << code_; return 1 << code_;
} }
// (unfortunately we can't make this private in a struct) // Unfortunately we can't make this private in a struct.
int code_; int code_;
}; };
@ -250,7 +250,7 @@ enum Coprocessor {
}; };
// Condition field in instructions // Condition field in instructions.
enum Condition { enum Condition {
eq = 0 << 28, // Z set equal. eq = 0 << 28, // Z set equal.
ne = 1 << 28, // Z clear not equal. ne = 1 << 28, // Z clear not equal.
@ -628,6 +628,9 @@ class Assembler : public Malloced {
void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above
// Data-processing instructions // Data-processing instructions
void ubfx(Register dst, Register src1, const Operand& src2,
const Operand& src3, Condition cond = al);
void and_(Register dst, Register src1, const Operand& src2, void and_(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al); SBit s = LeaveCC, Condition cond = al);

30
deps/v8/src/arm/assembler-thumb2-inl.h

@ -174,20 +174,6 @@ Operand::Operand(const ExternalReference& f) {
} }
Operand::Operand(Object** opp) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(opp);
rmode_ = RelocInfo::NONE;
}
Operand::Operand(Context** cpp) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(cpp);
rmode_ = RelocInfo::NONE;
}
Operand::Operand(Smi* value) { Operand::Operand(Smi* value) {
rm_ = no_reg; rm_ = no_reg;
imm32_ = reinterpret_cast<intptr_t>(value); imm32_ = reinterpret_cast<intptr_t>(value);
@ -229,14 +215,24 @@ void Assembler::emit(Instr x) {
Address Assembler::target_address_address_at(Address pc) { Address Assembler::target_address_address_at(Address pc) {
Instr instr = Memory::int32_at(pc); Address target_pc = pc;
// Verify that the instruction at pc is a ldr<cond> <Rd>, [pc +/- offset_12]. Instr instr = Memory::int32_at(target_pc);
// If we have a bx instruction, the instruction before the bx is
// what we need to patch.
static const int32_t kBxInstMask = 0x0ffffff0;
static const int32_t kBxInstPattern = 0x012fff10;
if ((instr & kBxInstMask) == kBxInstPattern) {
target_pc -= kInstrSize;
instr = Memory::int32_at(target_pc);
}
// Verify that the instruction to patch is a
// ldr<cond> <Rd>, [pc +/- offset_12].
ASSERT((instr & 0x0f7f0000) == 0x051f0000); ASSERT((instr & 0x0f7f0000) == 0x051f0000);
int offset = instr & 0xfff; // offset_12 is unsigned int offset = instr & 0xfff; // offset_12 is unsigned
if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
// Verify that the constant pool comes after the instruction referencing it. // Verify that the constant pool comes after the instruction referencing it.
ASSERT(offset >= -4); ASSERT(offset >= -4);
return pc + offset + 8; return target_pc + offset + 8;
} }

227
deps/v8/src/arm/assembler-thumb2.cc

@ -30,9 +30,9 @@
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE. // OF THE POSSIBILITY OF SUCH DAMAGE.
// The original source code covered by the above license above has been modified // The original source code covered by the above license above has been
// significantly by Google Inc. // modified significantly by Google Inc.
// Copyright 2006-2008 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
#include "v8.h" #include "v8.h"
@ -51,9 +51,14 @@ void CpuFeatures::Probe() {
// If the compiler is allowed to use vfp then we can use vfp too in our // If the compiler is allowed to use vfp then we can use vfp too in our
// code generation. // code generation.
#if !defined(__arm__) #if !defined(__arm__)
// For the simulator=arm build, always use VFP since the arm simulator has // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
// VFP support. if (FLAG_enable_vfp3) {
supported_ |= 1u << VFP3; supported_ |= 1u << VFP3;
}
// For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
if (FLAG_enable_armv7) {
supported_ |= 1u << ARMv7;
}
#else #else
if (Serializer::enabled()) { if (Serializer::enabled()) {
supported_ |= OS::CpuFeaturesImpliedByPlatform(); supported_ |= OS::CpuFeaturesImpliedByPlatform();
@ -66,6 +71,11 @@ void CpuFeatures::Probe() {
supported_ |= 1u << VFP3; supported_ |= 1u << VFP3;
found_by_runtime_probing_ |= 1u << VFP3; found_by_runtime_probing_ |= 1u << VFP3;
} }
if (OS::ArmCpuHasFeature(ARMv7)) {
supported_ |= 1u << ARMv7;
found_by_runtime_probing_ |= 1u << ARMv7;
}
#endif #endif
} }
@ -83,9 +93,9 @@ Register r4 = { 4 };
Register r5 = { 5 }; Register r5 = { 5 };
Register r6 = { 6 }; Register r6 = { 6 };
Register r7 = { 7 }; Register r7 = { 7 };
Register r8 = { 8 }; Register r8 = { 8 }; // Used as context register.
Register r9 = { 9 }; Register r9 = { 9 };
Register r10 = { 10 }; Register r10 = { 10 }; // Used as roots register.
Register fp = { 11 }; Register fp = { 11 };
Register ip = { 12 }; Register ip = { 12 };
Register sp = { 13 }; Register sp = { 13 };
@ -264,9 +274,9 @@ MemOperand::MemOperand(Register rn, Register rm,
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Implementation of Assembler // Implementation of Assembler.
// Instruction encoding bits // Instruction encoding bits.
enum { enum {
H = 1 << 5, // halfword (or byte) H = 1 << 5, // halfword (or byte)
S6 = 1 << 6, // signed (or unsigned) S6 = 1 << 6, // signed (or unsigned)
@ -299,14 +309,14 @@ enum {
B26 = 1 << 26, B26 = 1 << 26,
B27 = 1 << 27, B27 = 1 << 27,
// Instruction bit masks // Instruction bit masks.
RdMask = 15 << 12, // in str instruction RdMask = 15 << 12, // in str instruction
CondMask = 15 << 28, CondMask = 15 << 28,
CoprocessorMask = 15 << 8, CoprocessorMask = 15 << 8,
OpCodeMask = 15 << 21, // in data-processing instructions OpCodeMask = 15 << 21, // in data-processing instructions
Imm24Mask = (1 << 24) - 1, Imm24Mask = (1 << 24) - 1,
Off12Mask = (1 << 12) - 1, Off12Mask = (1 << 12) - 1,
// Reserved condition // Reserved condition.
nv = 15 << 28 nv = 15 << 28
}; };
@ -327,13 +337,13 @@ const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
// ldr pc, [pc, #XXX] // ldr pc, [pc, #XXX]
const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16; const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
// spare_buffer_ // Spare buffer.
static const int kMinimalBufferSize = 4*KB; static const int kMinimalBufferSize = 4*KB;
static byte* spare_buffer_ = NULL; static byte* spare_buffer_ = NULL;
Assembler::Assembler(void* buffer, int buffer_size) { Assembler::Assembler(void* buffer, int buffer_size) {
if (buffer == NULL) { if (buffer == NULL) {
// do our own buffer management // Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) { if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize; buffer_size = kMinimalBufferSize;
@ -351,14 +361,14 @@ Assembler::Assembler(void* buffer, int buffer_size) {
own_buffer_ = true; own_buffer_ = true;
} else { } else {
// use externally provided buffer instead // Use externally provided buffer instead.
ASSERT(buffer_size > 0); ASSERT(buffer_size > 0);
buffer_ = static_cast<byte*>(buffer); buffer_ = static_cast<byte*>(buffer);
buffer_size_ = buffer_size; buffer_size_ = buffer_size;
own_buffer_ = false; own_buffer_ = false;
} }
// setup buffer pointers // Setup buffer pointers.
ASSERT(buffer_ != NULL); ASSERT(buffer_ != NULL);
pc_ = buffer_; pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@ -386,11 +396,11 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) { void Assembler::GetCode(CodeDesc* desc) {
// emit constant pool if necessary // Emit constant pool if necessary.
CheckConstPool(true, false); CheckConstPool(true, false);
ASSERT(num_prinfo_ == 0); ASSERT(num_prinfo_ == 0);
// setup desc // Setup code descriptor.
desc->buffer = buffer_; desc->buffer = buffer_;
desc->buffer_size = buffer_size_; desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset(); desc->instr_size = pc_offset();
@ -539,7 +549,7 @@ void Assembler::bind_to(Label* L, int pos) {
void Assembler::link_to(Label* L, Label* appendix) { void Assembler::link_to(Label* L, Label* appendix) {
if (appendix->is_linked()) { if (appendix->is_linked()) {
if (L->is_linked()) { if (L->is_linked()) {
// append appendix to L's list // Append appendix to L's list.
int fixup_pos; int fixup_pos;
int link = L->pos(); int link = L->pos();
do { do {
@ -549,7 +559,7 @@ void Assembler::link_to(Label* L, Label* appendix) {
ASSERT(link == kEndOfChain); ASSERT(link == kEndOfChain);
target_at_put(fixup_pos, appendix->pos()); target_at_put(fixup_pos, appendix->pos());
} else { } else {
// L is empty, simply use appendix // L is empty, simply use appendix.
*L = *appendix; *L = *appendix;
} }
} }
@ -575,12 +585,12 @@ void Assembler::next(Label* L) {
} }
// Low-level code emission routines depending on the addressing mode // Low-level code emission routines depending on the addressing mode.
static bool fits_shifter(uint32_t imm32, static bool fits_shifter(uint32_t imm32,
uint32_t* rotate_imm, uint32_t* rotate_imm,
uint32_t* immed_8, uint32_t* immed_8,
Instr* instr) { Instr* instr) {
// imm32 must be unsigned // imm32 must be unsigned.
for (int rot = 0; rot < 16; rot++) { for (int rot = 0; rot < 16; rot++) {
uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot)); uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
if ((imm8 <= 0xff)) { if ((imm8 <= 0xff)) {
@ -589,7 +599,7 @@ static bool fits_shifter(uint32_t imm32,
return true; return true;
} }
} }
// if the opcode is mov or mvn and if ~imm32 fits, change the opcode // If the opcode is mov or mvn and if ~imm32 fits, change the opcode.
if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) { if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
*instr ^= 0x2*B21; *instr ^= 0x2*B21;
@ -626,7 +636,7 @@ void Assembler::addrmod1(Instr instr,
CheckBuffer(); CheckBuffer();
ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0); ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
if (!x.rm_.is_valid()) { if (!x.rm_.is_valid()) {
// immediate // Immediate.
uint32_t rotate_imm; uint32_t rotate_imm;
uint32_t immed_8; uint32_t immed_8;
if (MustUseIp(x.rmode_) || if (MustUseIp(x.rmode_) ||
@ -634,7 +644,7 @@ void Assembler::addrmod1(Instr instr,
// The immediate operand cannot be encoded as a shifter operand, so load // The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip. // it first to register ip and change the original instruction to use ip.
// However, if the original instruction is a 'mov rd, x' (not setting the // However, if the original instruction is a 'mov rd, x' (not setting the
// condition code), then replace it with a 'ldr rd, [pc]' // condition code), then replace it with a 'ldr rd, [pc]'.
RecordRelocInfo(x.rmode_, x.imm32_); RecordRelocInfo(x.rmode_, x.imm32_);
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = static_cast<Condition>(instr & CondMask); Condition cond = static_cast<Condition>(instr & CondMask);
@ -648,16 +658,16 @@ void Assembler::addrmod1(Instr instr,
} }
instr |= I | rotate_imm*B8 | immed_8; instr |= I | rotate_imm*B8 | immed_8;
} else if (!x.rs_.is_valid()) { } else if (!x.rs_.is_valid()) {
// immediate shift // Immediate shift.
instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
} else { } else {
// register shift // Register shift.
ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc)); ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code(); instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
} }
emit(instr | rn.code()*B16 | rd.code()*B12); emit(instr | rn.code()*B16 | rd.code()*B12);
if (rn.is(pc) || x.rm_.is(pc)) if (rn.is(pc) || x.rm_.is(pc))
// block constant pool emission for one instruction after reading pc // Block constant pool emission for one instruction after reading pc.
BlockConstPoolBefore(pc_offset() + kInstrSize); BlockConstPoolBefore(pc_offset() + kInstrSize);
} }
@ -666,15 +676,15 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
ASSERT((instr & ~(CondMask | B | L)) == B26); ASSERT((instr & ~(CondMask | B | L)) == B26);
int am = x.am_; int am = x.am_;
if (!x.rm_.is_valid()) { if (!x.rm_.is_valid()) {
// immediate offset // Immediate offset.
int offset_12 = x.offset_; int offset_12 = x.offset_;
if (offset_12 < 0) { if (offset_12 < 0) {
offset_12 = -offset_12; offset_12 = -offset_12;
am ^= U; am ^= U;
} }
if (!is_uint12(offset_12)) { if (!is_uint12(offset_12)) {
// immediate offset cannot be encoded, load it first to register ip // Immediate offset cannot be encoded, load it first to register ip
// rn (and rd in a load) should never be ip, or will be trashed // rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.offset_), LeaveCC, mov(ip, Operand(x.offset_), LeaveCC,
static_cast<Condition>(instr & CondMask)); static_cast<Condition>(instr & CondMask));
@ -684,9 +694,9 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
ASSERT(offset_12 >= 0); // no masking needed ASSERT(offset_12 >= 0); // no masking needed
instr |= offset_12; instr |= offset_12;
} else { } else {
// register offset (shift_imm_ and shift_op_ are 0) or scaled // Register offset (shift_imm_ and shift_op_ are 0) or scaled
// register offset the constructors make sure than both shift_imm_ // register offset the constructors make sure than both shift_imm_
// and shift_op_ are initialized // and shift_op_ are initialized.
ASSERT(!x.rm_.is(pc)); ASSERT(!x.rm_.is(pc));
instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
} }
@ -700,15 +710,15 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
ASSERT(x.rn_.is_valid()); ASSERT(x.rn_.is_valid());
int am = x.am_; int am = x.am_;
if (!x.rm_.is_valid()) { if (!x.rm_.is_valid()) {
// immediate offset // Immediate offset.
int offset_8 = x.offset_; int offset_8 = x.offset_;
if (offset_8 < 0) { if (offset_8 < 0) {
offset_8 = -offset_8; offset_8 = -offset_8;
am ^= U; am ^= U;
} }
if (!is_uint8(offset_8)) { if (!is_uint8(offset_8)) {
// immediate offset cannot be encoded, load it first to register ip // Immediate offset cannot be encoded, load it first to register ip
// rn (and rd in a load) should never be ip, or will be trashed // rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.offset_), LeaveCC, mov(ip, Operand(x.offset_), LeaveCC,
static_cast<Condition>(instr & CondMask)); static_cast<Condition>(instr & CondMask));
@ -718,15 +728,15 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
ASSERT(offset_8 >= 0); // no masking needed ASSERT(offset_8 >= 0); // no masking needed
instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf); instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
} else if (x.shift_imm_ != 0) { } else if (x.shift_imm_ != 0) {
// scaled register offset not supported, load index first // Scaled register offset not supported, load index first
// rn (and rd in a load) should never be ip, or will be trashed // rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC, mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
static_cast<Condition>(instr & CondMask)); static_cast<Condition>(instr & CondMask));
addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
return; return;
} else { } else {
// register offset // Register offset.
ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
instr |= x.rm_.code(); instr |= x.rm_.code();
} }
@ -744,7 +754,7 @@ void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) { void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
// unindexed addressing is not encoded by this function // Unindexed addressing is not encoded by this function.
ASSERT_EQ((B27 | B26), ASSERT_EQ((B27 | B26),
(instr & ~(CondMask | CoprocessorMask | P | U | N | W | L))); (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
ASSERT(x.rn_.is_valid() && !x.rm_.is_valid()); ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
@ -759,7 +769,7 @@ void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
// post-indexed addressing requires W == 1; different than in addrmod2/3 // Post-indexed addressing requires W == 1; different than in addrmod2/3.
if ((am & P) == 0) if ((am & P) == 0)
am |= W; am |= W;
@ -782,7 +792,7 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
} }
// Block the emission of the constant pool, since the branch instruction must // Block the emission of the constant pool, since the branch instruction must
// be emitted at the pc offset recorded by the label // be emitted at the pc offset recorded by the label.
BlockConstPoolBefore(pc_offset() + kInstrSize); BlockConstPoolBefore(pc_offset() + kInstrSize);
return target_pos - (pc_offset() + kPcLoadDelta); return target_pos - (pc_offset() + kPcLoadDelta);
} }
@ -804,7 +814,7 @@ void Assembler::label_at_put(Label* L, int at_offset) {
} }
// Branch instructions // Branch instructions.
void Assembler::b(int branch_offset, Condition cond) { void Assembler::b(int branch_offset, Condition cond) {
ASSERT((branch_offset & 3) == 0); ASSERT((branch_offset & 3) == 0);
int imm24 = branch_offset >> 2; int imm24 = branch_offset >> 2;
@ -812,7 +822,7 @@ void Assembler::b(int branch_offset, Condition cond) {
emit(cond | B27 | B25 | (imm24 & Imm24Mask)); emit(cond | B27 | B25 | (imm24 & Imm24Mask));
if (cond == al) if (cond == al)
// dead code is a good location to emit the constant pool // Dead code is a good location to emit the constant pool.
CheckConstPool(false, false); CheckConstPool(false, false);
} }
@ -849,7 +859,22 @@ void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
} }
// Data-processing instructions // Data-processing instructions.
// UBFX <Rd>,<Rn>,#<lsb>,#<width - 1>
// Instruction details available in ARM DDI 0406A, A8-464.
// cond(31-28) | 01111(27-23)| 1(22) | 1(21) | widthm1(20-16) |
// Rd(15-12) | lsb(11-7) | 101(6-4) | Rn(3-0)
void Assembler::ubfx(Register dst, Register src1, const Operand& src2,
const Operand& src3, Condition cond) {
ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid());
ASSERT(static_cast<uint32_t>(src2.imm32_) <= 0x1f);
ASSERT(static_cast<uint32_t>(src3.imm32_) <= 0x1f);
emit(cond | 0x3F*B21 | src3.imm32_*B16 |
dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code());
}
void Assembler::and_(Register dst, Register src1, const Operand& src2, void Assembler::and_(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | 0*B21 | s, src1, dst, src2); addrmod1(cond | 0*B21 | s, src1, dst, src2);
@ -886,7 +911,7 @@ void Assembler::add(Register dst, Register src1, const Operand& src2,
if (FLAG_push_pop_elimination && if (FLAG_push_pop_elimination &&
last_bound_pos_ <= (pc_offset() - pattern_size) && last_bound_pos_ <= (pc_offset() - pattern_size) &&
reloc_info_writer.last_pc() <= (pc_ - pattern_size) && reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
// pattern // Pattern.
instr_at(pc_ - 1 * kInstrSize) == kPopInstruction && instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
(instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) { (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
pc_ -= 2 * kInstrSize; pc_ -= 2 * kInstrSize;
@ -960,7 +985,7 @@ void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
} }
// Multiply instructions // Multiply instructions.
void Assembler::mla(Register dst, Register src1, Register src2, Register srcA, void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
SBit s, Condition cond) { SBit s, Condition cond) {
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc)); ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
@ -1029,7 +1054,7 @@ void Assembler::umull(Register dstL,
} }
// Miscellaneous arithmetic instructions // Miscellaneous arithmetic instructions.
void Assembler::clz(Register dst, Register src, Condition cond) { void Assembler::clz(Register dst, Register src, Condition cond) {
// v5 and above. // v5 and above.
ASSERT(!dst.is(pc) && !src.is(pc)); ASSERT(!dst.is(pc) && !src.is(pc));
@ -1038,7 +1063,7 @@ void Assembler::clz(Register dst, Register src, Condition cond) {
} }
// Status register access instructions // Status register access instructions.
void Assembler::mrs(Register dst, SRegister s, Condition cond) { void Assembler::mrs(Register dst, SRegister s, Condition cond) {
ASSERT(!dst.is(pc)); ASSERT(!dst.is(pc));
emit(cond | B24 | s | 15*B16 | dst.code()*B12); emit(cond | B24 | s | 15*B16 | dst.code()*B12);
@ -1050,12 +1075,12 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
ASSERT(fields >= B16 && fields < B20); // at least one field set ASSERT(fields >= B16 && fields < B20); // at least one field set
Instr instr; Instr instr;
if (!src.rm_.is_valid()) { if (!src.rm_.is_valid()) {
// immediate // Immediate.
uint32_t rotate_imm; uint32_t rotate_imm;
uint32_t immed_8; uint32_t immed_8;
if (MustUseIp(src.rmode_) || if (MustUseIp(src.rmode_) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) { !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// immediate operand cannot be encoded, load it first to register ip // Immediate operand cannot be encoded, load it first to register ip.
RecordRelocInfo(src.rmode_, src.imm32_); RecordRelocInfo(src.rmode_, src.imm32_);
ldr(ip, MemOperand(pc, 0), cond); ldr(ip, MemOperand(pc, 0), cond);
msr(fields, Operand(ip), cond); msr(fields, Operand(ip), cond);
@ -1070,7 +1095,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
} }
// Load/Store instructions // Load/Store instructions.
void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) { void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
if (dst.is(pc)) { if (dst.is(pc)) {
WriteRecordedPositions(); WriteRecordedPositions();
@ -1085,7 +1110,7 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
if (FLAG_push_pop_elimination && if (FLAG_push_pop_elimination &&
last_bound_pos_ <= (pc_offset() - pattern_size) && last_bound_pos_ <= (pc_offset() - pattern_size) &&
reloc_info_writer.last_pc() <= (pc_ - pattern_size) && reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
// pattern // Pattern.
instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) && instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) { instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
pc_ -= 2 * kInstrSize; pc_ -= 2 * kInstrSize;
@ -1106,6 +1131,7 @@ void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
if (FLAG_push_pop_elimination && if (FLAG_push_pop_elimination &&
last_bound_pos_ <= (pc_offset() - pattern_size) && last_bound_pos_ <= (pc_offset() - pattern_size) &&
reloc_info_writer.last_pc() <= (pc_ - pattern_size) && reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
// Pattern.
instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) && instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) { instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
pc_ -= 2 * kInstrSize; pc_ -= 2 * kInstrSize;
@ -1147,17 +1173,17 @@ void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
} }
// Load/Store multiple instructions // Load/Store multiple instructions.
void Assembler::ldm(BlockAddrMode am, void Assembler::ldm(BlockAddrMode am,
Register base, Register base,
RegList dst, RegList dst,
Condition cond) { Condition cond) {
// ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
ASSERT(base.is(sp) || (dst & sp.bit()) == 0); ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
addrmod4(cond | B27 | am | L, base, dst); addrmod4(cond | B27 | am | L, base, dst);
// emit the constant pool after a function return implemented by ldm ..{..pc} // Emit the constant pool after a function return implemented by ldm ..{..pc}.
if (cond == al && (dst & pc.bit()) != 0) { if (cond == al && (dst & pc.bit()) != 0) {
// There is a slight chance that the ldm instruction was actually a call, // There is a slight chance that the ldm instruction was actually a call,
// in which case it would be wrong to return into the constant pool; we // in which case it would be wrong to return into the constant pool; we
@ -1177,7 +1203,7 @@ void Assembler::stm(BlockAddrMode am,
} }
// Semaphore instructions // Semaphore instructions.
void Assembler::swp(Register dst, Register src, Register base, Condition cond) { void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc)); ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
ASSERT(!dst.is(base) && !src.is(base)); ASSERT(!dst.is(base) && !src.is(base));
@ -1197,7 +1223,7 @@ void Assembler::swpb(Register dst,
} }
// Exception-generating instructions and debugging support // Exception-generating instructions and debugging support.
void Assembler::stop(const char* msg) { void Assembler::stop(const char* msg) {
#if !defined(__arm__) #if !defined(__arm__)
// The simulator handles these special instructions and stops execution. // The simulator handles these special instructions and stops execution.
@ -1222,7 +1248,7 @@ void Assembler::swi(uint32_t imm24, Condition cond) {
} }
// Coprocessor instructions // Coprocessor instructions.
void Assembler::cdp(Coprocessor coproc, void Assembler::cdp(Coprocessor coproc,
int opcode_1, int opcode_1,
CRegister crd, CRegister crd,
@ -1307,7 +1333,7 @@ void Assembler::ldc(Coprocessor coproc,
int option, int option,
LFlag l, LFlag l,
Condition cond) { Condition cond) {
// unindexed addressing // Unindexed addressing.
ASSERT(is_uint8(option)); ASSERT(is_uint8(option));
emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 | emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
coproc*B8 | (option & 255)); coproc*B8 | (option & 255));
@ -1346,7 +1372,7 @@ void Assembler::stc(Coprocessor coproc,
int option, int option,
LFlag l, LFlag l,
Condition cond) { Condition cond) {
// unindexed addressing // Unindexed addressing.
ASSERT(is_uint8(option)); ASSERT(is_uint8(option));
emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 | emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
coproc*B8 | (option & 255)); coproc*B8 | (option & 255));
@ -1371,6 +1397,36 @@ void Assembler::stc2(Coprocessor coproc,
// Support for VFP. // Support for VFP.
void Assembler::vldr(const DwVfpRegister dst,
const Register base,
int offset,
const Condition cond) {
// Ddst = MEM(Rbase + offset).
// Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1011(11-8) | offset
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(offset % 4 == 0);
emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
0xB*B8 | ((offset / 4) & 255));
}
void Assembler::vstr(const DwVfpRegister src,
const Register base,
int offset,
const Condition cond) {
// MEM(Rbase + offset) = Dsrc.
// Instruction details available in ARM DDI 0406A, A8-786.
// cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) |
// Vsrc(15-12) | 1011(11-8) | (offset/4)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(offset % 4 == 0);
emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
0xB*B8 | ((offset / 4) & 255));
}
void Assembler::vmov(const DwVfpRegister dst, void Assembler::vmov(const DwVfpRegister dst,
const Register src1, const Register src1,
const Register src2, const Register src2,
@ -1434,7 +1490,7 @@ void Assembler::vcvt(const DwVfpRegister dst,
const Condition cond) { const Condition cond) {
// Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd). // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
// Instruction details available in ARM DDI 0406A, A8-576. // Instruction details available in ARM DDI 0406A, A8-576.
// cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) | // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=000(18-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 | emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
@ -1541,14 +1597,14 @@ void Assembler::vmrs(Register dst, Condition cond) {
} }
// Pseudo instructions // Pseudo instructions.
void Assembler::lea(Register dst, void Assembler::lea(Register dst,
const MemOperand& x, const MemOperand& x,
SBit s, SBit s,
Condition cond) { Condition cond) {
int am = x.am_; int am = x.am_;
if (!x.rm_.is_valid()) { if (!x.rm_.is_valid()) {
// immediate offset // Immediate offset.
if ((am & P) == 0) // post indexing if ((am & P) == 0) // post indexing
mov(dst, Operand(x.rn_), s, cond); mov(dst, Operand(x.rn_), s, cond);
else if ((am & U) == 0) // negative indexing else if ((am & U) == 0) // negative indexing
@ -1582,7 +1638,7 @@ void Assembler::BlockConstPoolFor(int instructions) {
} }
// Debugging // Debugging.
void Assembler::RecordJSReturn() { void Assembler::RecordJSReturn() {
WriteRecordedPositions(); WriteRecordedPositions();
CheckBuffer(); CheckBuffer();
@ -1635,7 +1691,7 @@ void Assembler::WriteRecordedPositions() {
void Assembler::GrowBuffer() { void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small"); if (!own_buffer_) FATAL("external code buffer is too small");
// compute new buffer size // Compute new buffer size.
CodeDesc desc; // the new buffer CodeDesc desc; // the new buffer
if (buffer_size_ < 4*KB) { if (buffer_size_ < 4*KB) {
desc.buffer_size = 4*KB; desc.buffer_size = 4*KB;
@ -1646,20 +1702,20 @@ void Assembler::GrowBuffer() {
} }
CHECK_GT(desc.buffer_size, 0); // no overflow CHECK_GT(desc.buffer_size, 0); // no overflow
// setup new buffer // Setup new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size); desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset(); desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
// copy the data // Copy the data.
int pc_delta = desc.buffer - buffer_; int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_); int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
memmove(desc.buffer, buffer_, desc.instr_size); memmove(desc.buffer, buffer_, desc.instr_size);
memmove(reloc_info_writer.pos() + rc_delta, memmove(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.pos(), desc.reloc_size); reloc_info_writer.pos(), desc.reloc_size);
// switch buffers // Switch buffers.
DeleteArray(buffer_); DeleteArray(buffer_);
buffer_ = desc.buffer; buffer_ = desc.buffer;
buffer_size_ = desc.buffer_size; buffer_size_ = desc.buffer_size;
@ -1667,11 +1723,11 @@ void Assembler::GrowBuffer() {
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta); reloc_info_writer.last_pc() + pc_delta);
// none of our relocation types are pc relative pointing outside the code // None of our relocation types are pc relative pointing outside the code
// buffer nor pc absolute pointing inside the code buffer, so there is no need // buffer nor pc absolute pointing inside the code buffer, so there is no need
// to relocate any emitted relocation entries // to relocate any emitted relocation entries.
// relocate pending relocation entries // Relocate pending relocation entries.
for (int i = 0; i < num_prinfo_; i++) { for (int i = 0; i < num_prinfo_; i++) {
RelocInfo& rinfo = prinfo_[i]; RelocInfo& rinfo = prinfo_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT && ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
@ -1686,16 +1742,16 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) { if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
// Adjust code for new modes // Adjust code for new modes.
ASSERT(RelocInfo::IsJSReturn(rmode) ASSERT(RelocInfo::IsJSReturn(rmode)
|| RelocInfo::IsComment(rmode) || RelocInfo::IsComment(rmode)
|| RelocInfo::IsPosition(rmode)); || RelocInfo::IsPosition(rmode));
// these modes do not need an entry in the constant pool // These modes do not need an entry in the constant pool.
} else { } else {
ASSERT(num_prinfo_ < kMaxNumPRInfo); ASSERT(num_prinfo_ < kMaxNumPRInfo);
prinfo_[num_prinfo_++] = rinfo; prinfo_[num_prinfo_++] = rinfo;
// Make sure the constant pool is not emitted in place of the next // Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info // instruction for which we just recorded relocation info.
BlockConstPoolBefore(pc_offset() + kInstrSize); BlockConstPoolBefore(pc_offset() + kInstrSize);
} }
if (rinfo.rmode() != RelocInfo::NONE) { if (rinfo.rmode() != RelocInfo::NONE) {
@ -1722,7 +1778,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// blocked for a specific range. // blocked for a specific range.
next_buffer_check_ = pc_offset() + kCheckConstInterval; next_buffer_check_ = pc_offset() + kCheckConstInterval;
// There is nothing to do if there are no pending relocation info entries // There is nothing to do if there are no pending relocation info entries.
if (num_prinfo_ == 0) return; if (num_prinfo_ == 0) return;
// We emit a constant pool at regular intervals of about kDistBetweenPools // We emit a constant pool at regular intervals of about kDistBetweenPools
@ -1748,10 +1804,11 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// no_const_pool_before_, which is checked here. Also, recursive calls to // no_const_pool_before_, which is checked here. Also, recursive calls to
// CheckConstPool are blocked by no_const_pool_before_. // CheckConstPool are blocked by no_const_pool_before_.
if (pc_offset() < no_const_pool_before_) { if (pc_offset() < no_const_pool_before_) {
// Emission is currently blocked; make sure we try again as soon as possible // Emission is currently blocked; make sure we try again as soon as
// possible.
next_buffer_check_ = no_const_pool_before_; next_buffer_check_ = no_const_pool_before_;
// Something is wrong if emission is forced and blocked at the same time // Something is wrong if emission is forced and blocked at the same time.
ASSERT(!force_emit); ASSERT(!force_emit);
return; return;
} }
@ -1765,23 +1822,23 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize); jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer(); while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
// Block recursive calls to CheckConstPool // Block recursive calls to CheckConstPool.
BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize + BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
num_prinfo_*kInstrSize); num_prinfo_*kInstrSize);
// Don't bother to check for the emit calls below. // Don't bother to check for the emit calls below.
next_buffer_check_ = no_const_pool_before_; next_buffer_check_ = no_const_pool_before_;
// Emit jump over constant pool if necessary // Emit jump over constant pool if necessary.
Label after_pool; Label after_pool;
if (require_jump) b(&after_pool); if (require_jump) b(&after_pool);
RecordComment("[ Constant Pool"); RecordComment("[ Constant Pool");
// Put down constant pool marker // Put down constant pool marker "Undefined instruction" as specified by
// "Undefined instruction" as specified by A3.1 Instruction set encoding // A3.1 Instruction set encoding.
emit(0x03000000 | num_prinfo_); emit(0x03000000 | num_prinfo_);
// Emit constant pool entries // Emit constant pool entries.
for (int i = 0; i < num_prinfo_; i++) { for (int i = 0; i < num_prinfo_; i++) {
RelocInfo& rinfo = prinfo_[i]; RelocInfo& rinfo = prinfo_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT && ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
@ -1789,8 +1846,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
rinfo.rmode() != RelocInfo::STATEMENT_POSITION); rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
Instr instr = instr_at(rinfo.pc()); Instr instr = instr_at(rinfo.pc());
// Instruction to patch must be a ldr/str [pc, #offset] // Instruction to patch must be a ldr/str [pc, #offset].
// P and U set, B and W clear, Rn == pc, offset12 still 0 // P and U set, B and W clear, Rn == pc, offset12 still 0.
ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) == ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
(2*B25 | P | U | pc.code()*B16)); (2*B25 | P | U | pc.code()*B16));
int delta = pc_ - rinfo.pc() - 8; int delta = pc_ - rinfo.pc() - 8;

25
deps/v8/src/arm/assembler-thumb2.h

@ -30,9 +30,9 @@
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE. // OF THE POSSIBILITY OF SUCH DAMAGE.
// The original source code covered by the above license above has been modified // The original source code covered by the above license above has been
// significantly by Google Inc. // modified significantly by Google Inc.
// Copyright 2006-2008 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// A light-weight ARM Assembler // A light-weight ARM Assembler
// Generates user mode instructions for the ARM architecture up to version 5 // Generates user mode instructions for the ARM architecture up to version 5
@ -80,7 +80,7 @@ struct Register {
return 1 << code_; return 1 << code_;
} }
// (unfortunately we can't make this private in a struct) // Unfortunately we can't make this private in a struct.
int code_; int code_;
}; };
@ -205,7 +205,7 @@ struct CRegister {
return 1 << code_; return 1 << code_;
} }
// (unfortunately we can't make this private in a struct) // Unfortunately we can't make this private in a struct.
int code_; int code_;
}; };
@ -250,7 +250,7 @@ enum Coprocessor {
}; };
// Condition field in instructions // Condition field in instructions.
enum Condition { enum Condition {
eq = 0 << 28, // Z set equal. eq = 0 << 28, // Z set equal.
ne = 1 << 28, // Z clear not equal. ne = 1 << 28, // Z clear not equal.
@ -398,8 +398,6 @@ class Operand BASE_EMBEDDED {
RelocInfo::Mode rmode = RelocInfo::NONE)); RelocInfo::Mode rmode = RelocInfo::NONE));
INLINE(explicit Operand(const ExternalReference& f)); INLINE(explicit Operand(const ExternalReference& f));
INLINE(explicit Operand(const char* s)); INLINE(explicit Operand(const char* s));
INLINE(explicit Operand(Object** opp));
INLINE(explicit Operand(Context** cpp));
explicit Operand(Handle<Object> handle); explicit Operand(Handle<Object> handle);
INLINE(explicit Operand(Smi* value)); INLINE(explicit Operand(Smi* value));
@ -630,6 +628,9 @@ class Assembler : public Malloced {
void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above
// Data-processing instructions // Data-processing instructions
void ubfx(Register dst, Register src1, const Operand& src2,
const Operand& src3, Condition cond = al);
void and_(Register dst, Register src1, const Operand& src2, void and_(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al); SBit s = LeaveCC, Condition cond = al);
@ -796,6 +797,14 @@ class Assembler : public Malloced {
// However, some simple modifications can allow // However, some simple modifications can allow
// these APIs to support D16 to D31. // these APIs to support D16 to D31.
void vldr(const DwVfpRegister dst,
const Register base,
int offset, // Offset must be a multiple of 4.
const Condition cond = al);
void vstr(const DwVfpRegister src,
const Register base,
int offset, // Offset must be a multiple of 4.
const Condition cond = al);
void vmov(const DwVfpRegister dst, void vmov(const DwVfpRegister dst,
const Register src1, const Register src1,
const Register src2, const Register src2,

143
deps/v8/src/arm/builtins-arm.cc

@ -499,7 +499,10 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// r0: number of arguments // r0: number of arguments
// r1: called object // r1: called object
__ bind(&non_function_call); __ bind(&non_function_call);
// CALL_NON_FUNCTION expects the non-function constructor as receiver
// (instead of the original receiver from the call site). The receiver is
// stack element argc.
__ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
// Set expected number of arguments to zero (not changing r0). // Set expected number of arguments to zero (not changing r0).
__ mov(r2, Operand(0)); __ mov(r2, Operand(0));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
@ -904,7 +907,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_FunctionCall(MacroAssembler* masm) { void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument. // 1. Make sure we have at least one argument.
// r0: actual number of argument // r0: actual number of arguments
{ Label done; { Label done;
__ tst(r0, Operand(r0)); __ tst(r0, Operand(r0));
__ b(ne, &done); __ b(ne, &done);
@ -914,40 +917,31 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ bind(&done); __ bind(&done);
} }
// 2. Get the function to call from the stack. // 2. Get the function to call (passed as receiver) from the stack, check
// r0: actual number of argument // if it is a function.
{ Label done, non_function, function; // r0: actual number of arguments
__ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2)); Label non_function;
__ tst(r1, Operand(kSmiTagMask)); __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
__ b(eq, &non_function); __ tst(r1, Operand(kSmiTagMask));
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); __ b(eq, &non_function);
__ b(eq, &function); __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &non_function);
// Non-function called: Clear the function to force exception.
__ bind(&non_function);
__ mov(r1, Operand(0));
__ b(&done);
// Change the context eagerly because it will be used below to get the
// right global object.
__ bind(&function);
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
__ bind(&done);
}
// 3. Make sure first argument is an object; convert if necessary. // 3a. Patch the first argument if necessary when calling a function.
// r0: actual number of arguments // r0: actual number of arguments
// r1: function // r1: function
{ Label call_to_object, use_global_receiver, patch_receiver, done; Label shift_arguments;
{ Label convert_to_object, use_global_receiver, patch_receiver;
// Change context eagerly in case we need the global receiver.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
__ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2)); __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
__ ldr(r2, MemOperand(r2, -kPointerSize)); __ ldr(r2, MemOperand(r2, -kPointerSize));
// r0: actual number of arguments // r0: actual number of arguments
// r1: function // r1: function
// r2: first argument // r2: first argument
__ tst(r2, Operand(kSmiTagMask)); __ tst(r2, Operand(kSmiTagMask));
__ b(eq, &call_to_object); __ b(eq, &convert_to_object);
__ LoadRoot(r3, Heap::kNullValueRootIndex); __ LoadRoot(r3, Heap::kNullValueRootIndex);
__ cmp(r2, r3); __ cmp(r2, r3);
@ -957,31 +951,28 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ b(eq, &use_global_receiver); __ b(eq, &use_global_receiver);
__ CompareObjectType(r2, r3, r3, FIRST_JS_OBJECT_TYPE); __ CompareObjectType(r2, r3, r3, FIRST_JS_OBJECT_TYPE);
__ b(lt, &call_to_object); __ b(lt, &convert_to_object);
__ cmp(r3, Operand(LAST_JS_OBJECT_TYPE)); __ cmp(r3, Operand(LAST_JS_OBJECT_TYPE));
__ b(le, &done); __ b(le, &shift_arguments);
__ bind(&call_to_object);
__ EnterInternalFrame();
// Store number of arguments and function across the call into the runtime. __ bind(&convert_to_object);
__ mov(r0, Operand(r0, LSL, kSmiTagSize)); __ EnterInternalFrame(); // In order to preserve argument count.
__ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged.
__ push(r0); __ push(r0);
__ push(r1);
__ push(r2); __ push(r2);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS); __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
__ mov(r2, r0); __ mov(r2, r0);
// Restore number of arguments and function.
__ pop(r1);
__ pop(r0); __ pop(r0);
__ mov(r0, Operand(r0, ASR, kSmiTagSize)); __ mov(r0, Operand(r0, ASR, kSmiTagSize));
__ LeaveInternalFrame(); __ LeaveInternalFrame();
__ b(&patch_receiver); // Restore the function to r1.
__ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
__ jmp(&patch_receiver);
// Use the global receiver object from the called function as the receiver. // Use the global receiver object from the called function as the
// receiver.
__ bind(&use_global_receiver); __ bind(&use_global_receiver);
const int kGlobalIndex = const int kGlobalIndex =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
@ -994,16 +985,30 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ add(r3, sp, Operand(r0, LSL, kPointerSizeLog2)); __ add(r3, sp, Operand(r0, LSL, kPointerSizeLog2));
__ str(r2, MemOperand(r3, -kPointerSize)); __ str(r2, MemOperand(r3, -kPointerSize));
__ bind(&done); __ jmp(&shift_arguments);
} }
// 4. Shift stuff one slot down the stack // 3b. Patch the first argument when calling a non-function. The
// r0: actual number of arguments (including call() receiver) // CALL_NON_FUNCTION builtin expects the non-function callee as
// receiver, so overwrite the first argument which will ultimately
// become the receiver.
// r0: actual number of arguments
// r1: function
__ bind(&non_function);
__ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
__ str(r1, MemOperand(r2, -kPointerSize));
// Clear r1 to indicate a non-function being called.
__ mov(r1, Operand(0));
// 4. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
// the original first argument the new receiver.
// r0: actual number of arguments
// r1: function // r1: function
__ bind(&shift_arguments);
{ Label loop; { Label loop;
// Calculate the copy start address (destination). Copy end address is sp. // Calculate the copy start address (destination). Copy end address is sp.
__ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2)); __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
__ add(r2, r2, Operand(kPointerSize)); // copy receiver too
__ bind(&loop); __ bind(&loop);
__ ldr(ip, MemOperand(r2, -kPointerSize)); __ ldr(ip, MemOperand(r2, -kPointerSize));
@ -1011,43 +1016,41 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ sub(r2, r2, Operand(kPointerSize)); __ sub(r2, r2, Operand(kPointerSize));
__ cmp(r2, sp); __ cmp(r2, sp);
__ b(ne, &loop); __ b(ne, &loop);
// Adjust the actual number of arguments and remove the top element
// (which is a copy of the last argument).
__ sub(r0, r0, Operand(1));
__ pop();
} }
// 5. Adjust the actual number of arguments and remove the top element. // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
// r0: actual number of arguments (including call() receiver)
// r1: function
__ sub(r0, r0, Operand(1));
__ add(sp, sp, Operand(kPointerSize));
// 6. Get the code for the function or the non-function builtin.
// If number of expected arguments matches, then call. Otherwise restart
// the arguments adaptor stub.
// r0: actual number of arguments // r0: actual number of arguments
// r1: function // r1: function
{ Label invoke; { Label function;
__ tst(r1, r1); __ tst(r1, r1);
__ b(ne, &invoke); __ b(ne, &function);
__ mov(r2, Operand(0)); // expected arguments is 0 for CALL_NON_FUNCTION __ mov(r2, Operand(0)); // expected arguments is 0 for CALL_NON_FUNCTION
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)), __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
__ bind(&function);
}
__ bind(&invoke); // 5b. Get the code to call from the function and check that the number of
__ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); // expected arguments matches what we're providing. If so, jump
__ ldr(r2, // (tail-call) to the code in register edx without checking arguments.
FieldMemOperand(r3, // r0: actual number of arguments
SharedFunctionInfo::kFormalParameterCountOffset)); // r1: function
__ ldr(r3, __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
MemOperand(r3, SharedFunctionInfo::kCodeOffset - kHeapObjectTag)); __ ldr(r2,
__ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
__ cmp(r2, r0); // Check formal and actual parameter counts. __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)), __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
RelocInfo::CODE_TARGET, ne); __ cmp(r2, r0); // Check formal and actual parameter counts.
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET, ne);
// 7. Jump to the code in r3 without checking arguments. ParameterCount expected(0);
ParameterCount expected(0); __ InvokeCode(r3, expected, expected, JUMP_FUNCTION);
__ InvokeCode(r3, expected, expected, JUMP_FUNCTION);
}
} }

902
deps/v8/src/arm/codegen-arm.cc

File diff suppressed because it is too large

100
deps/v8/src/arm/codegen-arm.h

@ -161,19 +161,15 @@ class CodeGenerator: public AstVisitor {
// Takes a function literal, generates code for it. This function should only // Takes a function literal, generates code for it. This function should only
// be called by compiler.cc. // be called by compiler.cc.
static Handle<Code> MakeCode(FunctionLiteral* fun, static Handle<Code> MakeCode(CompilationInfo* info);
Handle<Script> script,
bool is_eval,
CompilationInfo* info);
// Printing of AST, etc. as requested by flags. // Printing of AST, etc. as requested by flags.
static void MakeCodePrologue(FunctionLiteral* fun); static void MakeCodePrologue(CompilationInfo* info);
// Allocate and install the code. // Allocate and install the code.
static Handle<Code> MakeCodeEpilogue(FunctionLiteral* fun, static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
MacroAssembler* masm,
Code::Flags flags, Code::Flags flags,
Handle<Script> script); CompilationInfo* info);
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type); static bool ShouldGenerateLog(Expression* type);
@ -189,7 +185,7 @@ class CodeGenerator: public AstVisitor {
// Accessors // Accessors
MacroAssembler* masm() { return masm_; } MacroAssembler* masm() { return masm_; }
VirtualFrame* frame() const { return frame_; } VirtualFrame* frame() const { return frame_; }
Handle<Script> script() { return script_; } inline Handle<Script> script();
bool has_valid_frame() const { return frame_ != NULL; } bool has_valid_frame() const { return frame_ != NULL; }
@ -212,16 +208,15 @@ class CodeGenerator: public AstVisitor {
private: private:
// Construction/Destruction // Construction/Destruction
CodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval); explicit CodeGenerator(MacroAssembler* masm);
// Accessors // Accessors
Scope* scope() const { return scope_; } inline bool is_eval();
Scope* scope();
// Generating deferred code. // Generating deferred code.
void ProcessDeferred(); void ProcessDeferred();
bool is_eval() { return is_eval_; }
// State // State
bool has_cc() const { return cc_reg_ != al; } bool has_cc() const { return cc_reg_ != al; }
JumpTarget* true_target() const { return state_->true_target(); } JumpTarget* true_target() const { return state_->true_target(); }
@ -249,7 +244,7 @@ class CodeGenerator: public AstVisitor {
inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements); inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
// Main code generation function // Main code generation function
void Generate(FunctionLiteral* fun, Mode mode, CompilationInfo* info); void Generate(CompilationInfo* info, Mode mode);
// The following are used by class Reference. // The following are used by class Reference.
void LoadReference(Reference* ref); void LoadReference(Reference* ref);
@ -403,6 +398,9 @@ class CodeGenerator: public AstVisitor {
// Support for direct calls from JavaScript to native RegExp code. // Support for direct calls from JavaScript to native RegExp code.
void GenerateRegExpExec(ZoneList<Expression*>* args); void GenerateRegExpExec(ZoneList<Expression*>* args);
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
// Simple condition analysis. // Simple condition analysis.
enum ConditionAnalysis { enum ConditionAnalysis {
ALWAYS_TRUE, ALWAYS_TRUE,
@ -425,16 +423,14 @@ class CodeGenerator: public AstVisitor {
bool HasValidEntryRegisters(); bool HasValidEntryRegisters();
#endif #endif
bool is_eval_; // Tells whether code is generated for eval.
Handle<Script> script_;
List<DeferredCode*> deferred_; List<DeferredCode*> deferred_;
// Assembler // Assembler
MacroAssembler* masm_; // to generate code MacroAssembler* masm_; // to generate code
CompilationInfo* info_;
// Code generation state // Code generation state
Scope* scope_;
VirtualFrame* frame_; VirtualFrame* frame_;
RegisterAllocator* allocator_; RegisterAllocator* allocator_;
Condition cc_reg_; Condition cc_reg_;
@ -538,6 +534,74 @@ class GenericBinaryOpStub : public CodeStub {
}; };
class StringStubBase: public CodeStub {
public:
// Generate code for copying characters using a simple loop. This should only
// be used in places where the number of characters is small and the
// additional setup and checking in GenerateCopyCharactersLong adds too much
// overhead. Copying of overlapping regions is not supported.
// Dest register ends at the position after the last character written.
void GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
bool ascii);
// Generate code for copying a large number of characters. This function
// is allowed to spend extra time setting up conditions to make copying
// faster. Copying of overlapping regions is not supported.
// Dest register ends at the position after the last character written.
void GenerateCopyCharactersLong(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Register scratch5,
int flags);
};
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
};
class StringAddStub: public StringStubBase {
public:
explicit StringAddStub(StringAddFlags flags) {
string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
}
private:
Major MajorKey() { return StringAdd; }
int MinorKey() { return string_check_ ? 0 : 1; }
void Generate(MacroAssembler* masm);
// Should the stub check whether arguments are strings?
bool string_check_;
};
class SubStringStub: public StringStubBase {
public:
SubStringStub() {}
private:
Major MajorKey() { return SubString; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
class StringCompareStub: public CodeStub { class StringCompareStub: public CodeStub {
public: public:
StringCompareStub() { } StringCompareStub() { }

10
deps/v8/src/arm/debug-arm.cc

@ -128,7 +128,7 @@ void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// -- lr : return address // -- lr : return address
// -- [sp] : receiver // -- [sp] : receiver
// ----------------------------------- // -----------------------------------
// Registers r0 and r2 contain objects that needs to be pushed on the // Registers r0 and r2 contain objects that need to be pushed on the
// expression stack of the fake JS frame. // expression stack of the fake JS frame.
Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit()); Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit());
} }
@ -137,14 +137,14 @@ void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) { void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC store (from ic-arm.cc). // Calling convention for IC store (from ic-arm.cc).
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : receiver // -- r0 : value
// -- r1 : receiver
// -- r2 : name // -- r2 : name
// -- lr : return address // -- lr : return address
// -- [sp] : receiver
// ----------------------------------- // -----------------------------------
// Registers r0 and r2 contain objects that needs to be pushed on the // Registers r0, r1, and r2 contain objects that need to be pushed on the
// expression stack of the fake JS frame. // expression stack of the fake JS frame.
Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit()); Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit());
} }

25
deps/v8/src/arm/disasm-arm.cc

@ -429,12 +429,22 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
return 3; return 3;
} }
case 'o': { case 'o': {
if (format[3] == '1') { if ((format[3] == '1') && (format[4] == '2')) {
// 'off12: 12-bit offset for load and store instructions // 'off12: 12-bit offset for load and store instructions
ASSERT(STRING_STARTS_WITH(format, "off12")); ASSERT(STRING_STARTS_WITH(format, "off12"));
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", instr->Offset12Field()); "%d", instr->Offset12Field());
return 5; return 5;
} else if ((format[3] == '1') && (format[4] == '6')) {
ASSERT(STRING_STARTS_WITH(format, "off16to20"));
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", instr->Bits(20, 16) +1);
return 9;
} else if (format[3] == '7') {
ASSERT(STRING_STARTS_WITH(format, "off7to11"));
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", instr->ShiftAmountField());
return 8;
} }
// 'off8: 8-bit offset for extra load and store instructions // 'off8: 8-bit offset for extra load and store instructions
ASSERT(STRING_STARTS_WITH(format, "off8")); ASSERT(STRING_STARTS_WITH(format, "off8"));
@ -795,7 +805,18 @@ void Decoder::DecodeType3(Instr* instr) {
break; break;
} }
case 3: { case 3: {
Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w"); if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
uint32_t lsbit = static_cast<uint32_t>(instr->ShiftAmountField());
uint32_t msbit = widthminus1 + lsbit;
if (msbit <= 31) {
Format(instr, "ubfx'cond 'rd, 'rm, #'off7to11, #'off16to20");
} else {
UNREACHABLE();
}
} else {
Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
}
break; break;
} }
default: { default: {

188
deps/v8/src/arm/fast-codegen-arm.cc

@ -35,78 +35,142 @@ namespace internal {
#define __ ACCESS_MASM(masm()) #define __ ACCESS_MASM(masm())
void FastCodeGenerator::EmitLoadReceiver(Register reg) { Register FastCodeGenerator::accumulator0() { return r0; }
Register FastCodeGenerator::accumulator1() { return r1; }
Register FastCodeGenerator::scratch0() { return r3; }
Register FastCodeGenerator::scratch1() { return r4; }
Register FastCodeGenerator::receiver_reg() { return r2; }
Register FastCodeGenerator::context_reg() { return cp; }
void FastCodeGenerator::EmitLoadReceiver() {
// Offset 2 is due to return address and saved frame pointer. // Offset 2 is due to return address and saved frame pointer.
int index = 2 + function()->scope()->num_parameters(); int index = 2 + scope()->num_parameters();
__ ldr(reg, MemOperand(sp, index * kPointerSize)); __ ldr(receiver_reg(), MemOperand(sp, index * kPointerSize));
} }
void FastCodeGenerator::EmitReceiverMapCheck() { void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> cell) {
Comment cmnt(masm(), ";; MapCheck(this)"); ASSERT(!destination().is(no_reg));
if (FLAG_print_ir) { ASSERT(cell->IsJSGlobalPropertyCell());
PrintF("MapCheck(this)\n");
}
EmitLoadReceiver(r1); __ mov(destination(), Operand(cell));
__ BranchOnSmi(r1, bailout()); __ ldr(destination(),
FieldMemOperand(destination(), JSGlobalPropertyCell::kValueOffset));
if (FLAG_debug_code) {
__ mov(ip, Operand(Factory::the_hole_value()));
__ cmp(destination(), ip);
__ Check(ne, "DontDelete cells can't contain the hole");
}
ASSERT(has_receiver() && receiver()->IsHeapObject()); // The loaded value is not known to be a smi.
Handle<HeapObject> object = Handle<HeapObject>::cast(receiver()); clear_as_smi(destination());
Handle<Map> map(object->map());
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
__ mov(ip, Operand(map));
__ cmp(r3, ip);
__ b(ne, bailout());
} }
void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) { void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
// Compile global variable accesses as load IC calls. The only live LookupResult lookup;
// registers are cp (context) and possibly r1 (this). Both are also saved info()->receiver()->Lookup(*name, &lookup);
// in the stack and cp is preserved by the call.
__ ldr(ip, CodeGenerator::GlobalObject()); ASSERT(lookup.holder() == *info()->receiver());
__ push(ip); ASSERT(lookup.type() == FIELD);
__ mov(r2, Operand(name)); Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); int index = lookup.GetFieldIndex() - map->inobject_properties();
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT); int offset = index * kPointerSize;
if (has_this_properties()) {
// Restore this. // We will emit the write barrier unless the stored value is statically
EmitLoadReceiver(r1); // known to be a smi.
bool needs_write_barrier = !is_smi(accumulator0());
// Negative offsets are inobject properties.
if (offset < 0) {
offset += map->instance_size();
__ str(accumulator0(), FieldMemOperand(receiver_reg(), offset));
if (needs_write_barrier) {
// Preserve receiver from write barrier.
__ mov(scratch0(), receiver_reg());
}
} else {
offset += FixedArray::kHeaderSize;
__ ldr(scratch0(),
FieldMemOperand(receiver_reg(), JSObject::kPropertiesOffset));
__ str(accumulator0(), FieldMemOperand(scratch0(), offset));
}
if (needs_write_barrier) {
__ mov(scratch1(), Operand(offset));
__ RecordWrite(scratch0(), scratch1(), ip);
}
if (destination().is(accumulator1())) {
__ mov(accumulator1(), accumulator0());
if (is_smi(accumulator0())) {
set_as_smi(accumulator1());
} else {
clear_as_smi(accumulator1());
}
} }
} }
void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) { void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
ASSERT(!destination().is(no_reg));
LookupResult lookup; LookupResult lookup;
receiver()->Lookup(*name, &lookup); info()->receiver()->Lookup(*name, &lookup);
ASSERT(lookup.holder() == *receiver()); ASSERT(lookup.holder() == *info()->receiver());
ASSERT(lookup.type() == FIELD); ASSERT(lookup.type() == FIELD);
Handle<Map> map(Handle<HeapObject>::cast(receiver())->map()); Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
int index = lookup.GetFieldIndex() - map->inobject_properties(); int index = lookup.GetFieldIndex() - map->inobject_properties();
int offset = index * kPointerSize; int offset = index * kPointerSize;
// Negative offsets are inobject properties. // Perform the load. Negative offsets are inobject properties.
if (offset < 0) { if (offset < 0) {
offset += map->instance_size(); offset += map->instance_size();
__ mov(r2, r1); // Copy receiver for write barrier. __ ldr(destination(), FieldMemOperand(receiver_reg(), offset));
} else { } else {
offset += FixedArray::kHeaderSize; offset += FixedArray::kHeaderSize;
__ ldr(r2, FieldMemOperand(r1, JSObject::kPropertiesOffset)); __ ldr(scratch0(),
FieldMemOperand(receiver_reg(), JSObject::kPropertiesOffset));
__ ldr(destination(), FieldMemOperand(scratch0(), offset));
}
// The loaded value is not known to be a smi.
clear_as_smi(destination());
}
void FastCodeGenerator::EmitBitOr() {
if (is_smi(accumulator0()) && is_smi(accumulator1())) {
// If both operands are known to be a smi then there is no need to check
// the operands or result. There is no need to perform the operation in
// an effect context.
if (!destination().is(no_reg)) {
__ orr(destination(), accumulator1(), Operand(accumulator0()));
}
} else if (destination().is(no_reg)) {
// Result is not needed but do not clobber the operands in case of
// bailout.
__ orr(scratch0(), accumulator1(), Operand(accumulator0()));
__ BranchOnNotSmi(scratch0(), bailout());
} else {
// Preserve the destination operand in a scratch register in case of
// bailout.
__ mov(scratch0(), destination());
__ orr(destination(), accumulator1(), Operand(accumulator0()));
__ BranchOnNotSmi(destination(), bailout());
} }
// Perform the store.
__ str(r0, FieldMemOperand(r2, offset)); // If we didn't bailout, the result (in fact, both inputs too) is known to
__ mov(r3, Operand(offset)); // be a smi.
__ RecordWrite(r2, r3, ip); set_as_smi(accumulator0());
set_as_smi(accumulator1());
} }
void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) { void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
ASSERT(function_ == NULL);
ASSERT(info_ == NULL); ASSERT(info_ == NULL);
function_ = fun; info_ = compilation_info;
info_ = info;
// Save the caller's frame pointer and set up our own. // Save the caller's frame pointer and set up our own.
Comment prologue_cmnt(masm(), ";; Prologue"); Comment prologue_cmnt(masm(), ";; Prologue");
@ -115,18 +179,42 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) {
// Note that we keep a live register reference to cp (context) at // Note that we keep a live register reference to cp (context) at
// this point. // this point.
// Receiver (this) is allocated to r1 if there are this properties. // Receiver (this) is allocated to a fixed register.
if (has_this_properties()) EmitReceiverMapCheck(); if (info()->has_this_properties()) {
Comment cmnt(masm(), ";; MapCheck(this)");
if (FLAG_print_ir) {
PrintF("MapCheck(this)\n");
}
ASSERT(info()->has_receiver() && info()->receiver()->IsHeapObject());
Handle<HeapObject> object = Handle<HeapObject>::cast(info()->receiver());
Handle<Map> map(object->map());
EmitLoadReceiver();
__ CheckMap(receiver_reg(), scratch0(), map, bailout(), false);
}
VisitStatements(fun->body()); // If there is a global variable access check if the global object is the
// same as at lazy-compilation time.
if (info()->has_globals()) {
Comment cmnt(masm(), ";; MapCheck(GLOBAL)");
if (FLAG_print_ir) {
PrintF("MapCheck(GLOBAL)\n");
}
ASSERT(info()->has_global_object());
Handle<Map> map(info()->global_object()->map());
__ ldr(scratch0(), CodeGenerator::GlobalObject());
__ CheckMap(scratch0(), scratch1(), map, bailout(), true);
}
VisitStatements(function()->body());
Comment return_cmnt(masm(), ";; Return(<undefined>)"); Comment return_cmnt(masm(), ";; Return(<undefined>)");
if (FLAG_print_ir) {
PrintF("Return(<undefined>)\n");
}
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex); __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
Comment epilogue_cmnt(masm(), ";; Epilogue");
__ mov(sp, fp); __ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit()); __ ldm(ia_w, sp, fp.bit() | lr.bit());
int32_t sp_delta = (fun->scope()->num_parameters() + 1) * kPointerSize; int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
__ add(sp, sp, Operand(sp_delta)); __ add(sp, sp, Operand(sp_delta));
__ Jump(lr); __ Jump(lr);

182
deps/v8/src/arm/full-codegen-arm.cc

@ -52,12 +52,13 @@ namespace internal {
// //
// The function builds a JS frame. Please see JavaScriptFrameConstants in // The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-arm.h for its layout. // frames-arm.h for its layout.
void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) { void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
function_ = fun; ASSERT(info_ == NULL);
SetFunctionPosition(fun); info_ = info;
SetFunctionPosition(function());
if (mode == PRIMARY) { if (mode == PRIMARY) {
int locals_count = fun->scope()->num_stack_slots(); int locals_count = scope()->num_stack_slots();
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
if (locals_count > 0) { if (locals_count > 0) {
@ -77,7 +78,7 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
bool function_in_register = true; bool function_in_register = true;
// Possibly allocate a local context. // Possibly allocate a local context.
if (fun->scope()->num_heap_slots() > 0) { if (scope()->num_heap_slots() > 0) {
Comment cmnt(masm_, "[ Allocate local context"); Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is in r1. // Argument to NewContext is the function, which is in r1.
__ push(r1); __ push(r1);
@ -87,9 +88,9 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
// passed to us. It's saved in the stack and kept live in cp. // passed to us. It's saved in the stack and kept live in cp.
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context. // Copy any necessary parameters into the context.
int num_parameters = fun->scope()->num_parameters(); int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) { for (int i = 0; i < num_parameters; i++) {
Slot* slot = fun->scope()->parameter(i)->slot(); Slot* slot = scope()->parameter(i)->slot();
if (slot != NULL && slot->type() == Slot::CONTEXT) { if (slot != NULL && slot->type() == Slot::CONTEXT) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset + int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize; (num_parameters - 1 - i) * kPointerSize;
@ -107,7 +108,7 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
} }
} }
Variable* arguments = fun->scope()->arguments()->AsVariable(); Variable* arguments = scope()->arguments()->AsVariable();
if (arguments != NULL) { if (arguments != NULL) {
// Function uses arguments object. // Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object"); Comment cmnt(masm_, "[ Allocate arguments object");
@ -118,9 +119,10 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
__ mov(r3, r1); __ mov(r3, r1);
} }
// Receiver is just before the parameters on the caller's stack. // Receiver is just before the parameters on the caller's stack.
__ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset + int offset = scope()->num_parameters() * kPointerSize;
fun->num_parameters() * kPointerSize)); __ add(r2, fp,
__ mov(r1, Operand(Smi::FromInt(fun->num_parameters()))); Operand(StandardFrameConstants::kCallerSPOffset + offset));
__ mov(r1, Operand(Smi::FromInt(scope()->num_parameters())));
__ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit()); __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
// Arguments to ArgumentsAccessStub: // Arguments to ArgumentsAccessStub:
@ -133,7 +135,7 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
__ mov(r3, r0); __ mov(r3, r0);
Move(arguments->slot(), r0, r1, r2); Move(arguments->slot(), r0, r1, r2);
Slot* dot_arguments_slot = Slot* dot_arguments_slot =
fun->scope()->arguments_shadow()->AsVariable()->slot(); scope()->arguments_shadow()->AsVariable()->slot();
Move(dot_arguments_slot, r3, r1, r2); Move(dot_arguments_slot, r3, r1, r2);
} }
} }
@ -155,7 +157,7 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
} }
{ Comment cmnt(masm_, "[ Declarations"); { Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(fun->scope()->declarations()); VisitDeclarations(scope()->declarations());
} }
if (FLAG_trace) { if (FLAG_trace) {
@ -164,7 +166,7 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
{ Comment cmnt(masm_, "[ Body"); { Comment cmnt(masm_, "[ Body");
ASSERT(loop_depth() == 0); ASSERT(loop_depth() == 0);
VisitStatements(fun->body()); VisitStatements(function()->body());
ASSERT(loop_depth() == 0); ASSERT(loop_depth() == 0);
} }
@ -173,7 +175,7 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
// body. // body.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex); __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
} }
EmitReturnSequence(function_->end_position()); EmitReturnSequence(function()->end_position());
} }
@ -196,7 +198,7 @@ void FullCodeGenerator::EmitReturnSequence(int position) {
// Calculate the exact length of the return sequence and make sure that // Calculate the exact length of the return sequence and make sure that
// the constant pool is not emitted inside of the return sequence. // the constant pool is not emitted inside of the return sequence.
int num_parameters = function_->scope()->num_parameters(); int num_parameters = scope()->num_parameters();
int32_t sp_delta = (num_parameters + 1) * kPointerSize; int32_t sp_delta = (num_parameters + 1) * kPointerSize;
int return_sequence_length = Assembler::kJSReturnSequenceLength; int return_sequence_length = Assembler::kJSReturnSequenceLength;
if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) { if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
@ -512,7 +514,7 @@ MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
return MemOperand(fp, SlotOffset(slot)); return MemOperand(fp, SlotOffset(slot));
case Slot::CONTEXT: { case Slot::CONTEXT: {
int context_chain_length = int context_chain_length =
function_->scope()->ContextChainLength(slot->var()->scope()); scope()->ContextChainLength(slot->var()->scope());
__ LoadContext(scratch, context_chain_length); __ LoadContext(scratch, context_chain_length);
return CodeGenerator::ContextOperand(scratch, slot->index()); return CodeGenerator::ContextOperand(scratch, slot->index());
} }
@ -572,7 +574,7 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
// this specific context. // this specific context.
// The variable in the decl always resides in the current context. // The variable in the decl always resides in the current context.
ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope())); ASSERT_EQ(0, scope()->ContextChainLength(var->scope()));
if (FLAG_debug_code) { if (FLAG_debug_code) {
// Check if we have the correct context pointer. // Check if we have the correct context pointer.
__ ldr(r1, __ ldr(r1,
@ -652,7 +654,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals. // Call the runtime to declare the globals.
// The context is the first argument. // The context is the first argument.
__ mov(r1, Operand(pairs)); __ mov(r1, Operand(pairs));
__ mov(r0, Operand(Smi::FromInt(is_eval_ ? 1 : 0))); __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
__ stm(db_w, sp, cp.bit() | r1.bit() | r0.bit()); __ stm(db_w, sp, cp.bit() | r1.bit() | r0.bit());
__ CallRuntime(Runtime::kDeclareGlobals, 3); __ CallRuntime(Runtime::kDeclareGlobals, 3);
// Return value is ignored. // Return value is ignored.
@ -664,7 +666,7 @@ void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
// Build the function boilerplate and instantiate it. // Build the function boilerplate and instantiate it.
Handle<JSFunction> boilerplate = Handle<JSFunction> boilerplate =
Compiler::BuildBoilerplate(expr, script_, this); Compiler::BuildBoilerplate(expr, script(), this);
if (HasStackOverflow()) return; if (HasStackOverflow()) return;
ASSERT(boilerplate->IsBoilerplate()); ASSERT(boilerplate->IsBoilerplate());
@ -814,9 +816,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (key->handle()->IsSymbol()) { if (key->handle()->IsSymbol()) {
VisitForValue(value, kAccumulator); VisitForValue(value, kAccumulator);
__ mov(r2, Operand(key->handle())); __ mov(r2, Operand(key->handle()));
__ ldr(r1, MemOperand(sp));
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET); __ Call(ic, RelocInfo::CODE_TARGET);
// StoreIC leaves the receiver on the stack.
break; break;
} }
// Fall through. // Fall through.
@ -905,6 +907,92 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} }
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
Comment cmnt(masm_, "[ Assignment");
ASSERT(expr->op() != Token::INIT_CONST);
// Left-hand side can only be a property, a global or a (parameter or local)
// slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->target()->AsProperty();
if (prop != NULL) {
assign_type =
(prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
}
// Evaluate LHS expression.
switch (assign_type) {
case VARIABLE:
// Nothing to do here.
break;
case NAMED_PROPERTY:
if (expr->is_compound()) {
// We need the receiver both on the stack and in the accumulator.
VisitForValue(prop->obj(), kAccumulator);
__ push(result_register());
} else {
VisitForValue(prop->obj(), kStack);
}
break;
case KEYED_PROPERTY:
VisitForValue(prop->obj(), kStack);
VisitForValue(prop->key(), kStack);
break;
}
// If we have a compound assignment: Get value of LHS expression and
// store in on top of the stack.
if (expr->is_compound()) {
Location saved_location = location_;
location_ = kStack;
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
Expression::kValue);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(prop);
__ push(result_register());
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(prop);
__ push(result_register());
break;
}
location_ = saved_location;
}
// Evaluate RHS expression.
Expression* rhs = expr->value();
VisitForValue(rhs, kAccumulator);
// If we have a compound assignment: Apply operator.
if (expr->is_compound()) {
Location saved_location = location_;
location_ = kAccumulator;
EmitBinaryOp(expr->binary_op(), Expression::kValue);
location_ = saved_location;
}
// Record source position before possible IC call.
SetSourcePosition(expr->position());
// Store the value.
switch (assign_type) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
context_);
break;
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
break;
}
}
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position()); SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral(); Literal* key = prop->key()->AsLiteral();
@ -943,21 +1031,17 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
ASSERT(!var->is_this()); ASSERT(!var->is_this());
// Assignment to a global variable. Use inline caching for the // Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in r0, variable name in // assignment. Right-hand-side value is passed in r0, variable name in
// r2, and the global object on the stack. // r2, and the global object in r1.
__ mov(r2, Operand(var->name())); __ mov(r2, Operand(var->name()));
__ ldr(ip, CodeGenerator::GlobalObject()); __ ldr(r1, CodeGenerator::GlobalObject());
__ push(ip);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET); __ Call(ic, RelocInfo::CODE_TARGET);
// Overwrite the global object on the stack with the result if needed.
DropAndApply(1, context, r0);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) { } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
__ push(result_register()); // Value. __ push(result_register()); // Value.
__ mov(r1, Operand(var->name())); __ mov(r1, Operand(var->name()));
__ stm(db_w, sp, cp.bit() | r1.bit()); // Context and name. __ stm(db_w, sp, cp.bit() | r1.bit()); // Context and name.
__ CallRuntime(Runtime::kStoreContextSlot, 3); __ CallRuntime(Runtime::kStoreContextSlot, 3);
Apply(context, r0);
} else if (var->slot() != NULL) { } else if (var->slot() != NULL) {
Slot* slot = var->slot(); Slot* slot = var->slot();
@ -984,13 +1068,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
UNREACHABLE(); UNREACHABLE();
break; break;
} }
Apply(context, result_register());
} else { } else {
// Variables rewritten as properties are not treated as variables in // Variables rewritten as properties are not treated as variables in
// assignments. // assignments.
UNREACHABLE(); UNREACHABLE();
} }
Apply(context, result_register());
} }
@ -1014,6 +1098,12 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call. // Record source code position before IC call.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
__ mov(r2, Operand(prop->key()->AsLiteral()->handle())); __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
if (expr->ends_initialization_block()) {
__ ldr(r1, MemOperand(sp));
} else {
__ pop(r1);
}
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET); __ Call(ic, RelocInfo::CODE_TARGET);
@ -1024,9 +1114,10 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ push(ip); __ push(ip);
__ CallRuntime(Runtime::kToFastProperties, 1); __ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(r0); __ pop(r0);
DropAndApply(1, context_, r0);
} else {
Apply(context_, r0);
} }
DropAndApply(1, context_, r0);
} }
@ -1085,7 +1176,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
} }
void FullCodeGenerator::EmitCallWithIC(Call* expr, void FullCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> ignored, Handle<Object> name,
RelocInfo::Mode mode) { RelocInfo::Mode mode) {
// Code common for calls using the IC. // Code common for calls using the IC.
ZoneList<Expression*>* args = expr->arguments(); ZoneList<Expression*>* args = expr->arguments();
@ -1093,16 +1184,16 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
for (int i = 0; i < arg_count; i++) { for (int i = 0; i < arg_count; i++) {
VisitForValue(args->at(i), kStack); VisitForValue(args->at(i), kStack);
} }
__ mov(r2, Operand(name));
// Record source position for debugger. // Record source position for debugger.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
// Call the IC initialization code. // Call the IC initialization code.
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
NOT_IN_LOOP); Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
__ Call(ic, mode); __ Call(ic, mode);
// Restore context register. // Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS. Apply(context_, r0);
DropAndApply(1, context_, r0);
} }
@ -1119,7 +1210,6 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
__ CallStub(&stub); __ CallStub(&stub);
// Restore context register. // Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
DropAndApply(1, context_, r0); DropAndApply(1, context_, r0);
} }
@ -1133,11 +1223,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Call to the identifier 'eval'. // Call to the identifier 'eval'.
UNREACHABLE(); UNREACHABLE();
} else if (var != NULL && !var->is_this() && var->is_global()) { } else if (var != NULL && !var->is_this() && var->is_global()) {
// Call to a global variable. // Push global object as receiver for the call IC.
__ mov(r1, Operand(var->name()));
// Push global object as receiver for the call IC lookup.
__ ldr(r0, CodeGenerator::GlobalObject()); __ ldr(r0, CodeGenerator::GlobalObject());
__ stm(db_w, sp, r1.bit() | r0.bit()); __ push(r0);
EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT); EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
} else if (var != NULL && var->slot() != NULL && } else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) { var->slot()->type() == Slot::LOOKUP) {
@ -1149,8 +1237,6 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Literal* key = prop->key()->AsLiteral(); Literal* key = prop->key()->AsLiteral();
if (key != NULL && key->handle()->IsSymbol()) { if (key != NULL && key->handle()->IsSymbol()) {
// Call to a named property, use call IC. // Call to a named property, use call IC.
__ mov(r0, Operand(key->handle()));
__ push(r0);
VisitForValue(prop->obj(), kStack); VisitForValue(prop->obj(), kStack);
EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET); EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else { } else {
@ -1236,10 +1322,9 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) { if (expr->is_jsruntime()) {
// Prepare for calling JS runtime function. // Prepare for calling JS runtime function.
__ mov(r1, Operand(expr->name()));
__ ldr(r0, CodeGenerator::GlobalObject()); __ ldr(r0, CodeGenerator::GlobalObject());
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kBuiltinsOffset)); __ ldr(r0, FieldMemOperand(r0, GlobalObject::kBuiltinsOffset));
__ stm(db_w, sp, r1.bit() | r0.bit()); __ push(r0);
} }
// Push the arguments ("left-to-right"). // Push the arguments ("left-to-right").
@ -1250,18 +1335,17 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) { if (expr->is_jsruntime()) {
// Call the JS runtime function. // Call the JS runtime function.
__ mov(r2, Operand(expr->name()));
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
NOT_IN_LOOP); NOT_IN_LOOP);
__ Call(ic, RelocInfo::CODE_TARGET); __ Call(ic, RelocInfo::CODE_TARGET);
// Restore context register. // Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
DropAndApply(1, context_, r0);
} else { } else {
// Call the C runtime function. // Call the C runtime function.
__ CallRuntime(expr->function(), arg_count); __ CallRuntime(expr->function(), arg_count);
Apply(context_, r0);
} }
Apply(context_, r0);
} }
@ -1546,15 +1630,15 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break; break;
case NAMED_PROPERTY: { case NAMED_PROPERTY: {
__ mov(r2, Operand(prop->key()->AsLiteral()->handle())); __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
__ pop(r1);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET); __ Call(ic, RelocInfo::CODE_TARGET);
if (expr->is_postfix()) { if (expr->is_postfix()) {
__ Drop(1); // Result is on the stack under the receiver.
if (context_ != Expression::kEffect) { if (context_ != Expression::kEffect) {
ApplyTOS(context_); ApplyTOS(context_);
} }
} else { } else {
DropAndApply(1, context_, r0); Apply(context_, r0);
} }
break; break;
} }

214
deps/v8/src/arm/ic-arm.cc

@ -59,7 +59,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// r3 - used as temporary and to hold the capacity of the property // r3 - used as temporary and to hold the capacity of the property
// dictionary. // dictionary.
// //
// r2 - holds the name of the property and is unchanges. // r2 - holds the name of the property and is unchanged.
Label done; Label done;
@ -190,7 +190,7 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm) {
__ ldr(r0, MemOperand(sp, 0)); __ ldr(r0, MemOperand(sp, 0));
StubCompiler::GenerateLoadStringLength2(masm, r0, r1, r3, &miss); StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss);
// Cache miss: Jump to runtime. // Cache miss: Jump to runtime.
__ bind(&miss); __ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@ -219,14 +219,13 @@ Object* CallIC_Miss(Arguments args);
void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- lr: return address // -- r2 : name
// -- lr : return address
// ----------------------------------- // -----------------------------------
Label number, non_number, non_string, boolean, probe, miss; Label number, non_number, non_string, boolean, probe, miss;
// Get the receiver of the function from the stack into r1. // Get the receiver of the function from the stack into r1.
__ ldr(r1, MemOperand(sp, argc * kPointerSize)); __ ldr(r1, MemOperand(sp, argc * kPointerSize));
// Get the name of the function from the stack; 1 ~ receiver.
__ ldr(r2, MemOperand(sp, (argc + 1) * kPointerSize));
// Probe the stub cache. // Probe the stub cache.
Code::Flags flags = Code::Flags flags =
@ -301,9 +300,9 @@ static void GenerateNormalHelper(MacroAssembler* masm,
// Patch the receiver with the global proxy if necessary. // Patch the receiver with the global proxy if necessary.
if (is_global_object) { if (is_global_object) {
__ ldr(r2, MemOperand(sp, argc * kPointerSize)); __ ldr(r0, MemOperand(sp, argc * kPointerSize));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset)); __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
__ str(r2, MemOperand(sp, argc * kPointerSize)); __ str(r0, MemOperand(sp, argc * kPointerSize));
} }
// Invoke the function. // Invoke the function.
@ -314,14 +313,13 @@ static void GenerateNormalHelper(MacroAssembler* masm,
void CallIC::GenerateNormal(MacroAssembler* masm, int argc) { void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- lr: return address // -- r2 : name
// -- lr : return address
// ----------------------------------- // -----------------------------------
Label miss, global_object, non_global_object; Label miss, global_object, non_global_object;
// Get the receiver of the function from the stack into r1. // Get the receiver of the function from the stack into r1.
__ ldr(r1, MemOperand(sp, argc * kPointerSize)); __ ldr(r1, MemOperand(sp, argc * kPointerSize));
// Get the name of the function from the stack; 1 ~ receiver.
__ ldr(r2, MemOperand(sp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ tst(r1, Operand(kSmiTagMask)); __ tst(r1, Operand(kSmiTagMask));
@ -374,18 +372,17 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) { void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- lr: return address // -- r2 : name
// -- lr : return address
// ----------------------------------- // -----------------------------------
// Get the receiver of the function from the stack. // Get the receiver of the function from the stack.
__ ldr(r2, MemOperand(sp, argc * kPointerSize)); __ ldr(r3, MemOperand(sp, argc * kPointerSize));
// Get the name of the function to call from the stack.
__ ldr(r1, MemOperand(sp, (argc + 1) * kPointerSize));
__ EnterInternalFrame(); __ EnterInternalFrame();
// Push the receiver and the name of the function. // Push the receiver and the name of the function.
__ stm(db_w, sp, r1.bit() | r2.bit()); __ stm(db_w, sp, r2.bit() | r3.bit());
// Call the entry. // Call the entry.
__ mov(r0, Operand(2)); __ mov(r0, Operand(2));
@ -438,7 +435,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
StubCache::GenerateProbe(masm, flags, r0, r2, r3, no_reg); StubCache::GenerateProbe(masm, flags, r0, r2, r3, no_reg);
// Cache miss: Jump to runtime. // Cache miss: Jump to runtime.
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss))); GenerateMiss(masm);
} }
@ -482,16 +479,11 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Cache miss: Restore receiver from stack and jump to runtime. // Cache miss: Restore receiver from stack and jump to runtime.
__ bind(&miss); __ bind(&miss);
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss))); GenerateMiss(masm);
} }
void LoadIC::GenerateMiss(MacroAssembler* masm) { void LoadIC::GenerateMiss(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
}
void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r2 : name // -- r2 : name
// -- lr : return address // -- lr : return address
@ -502,7 +494,7 @@ void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
__ stm(db_w, sp, r2.bit() | r3.bit()); __ stm(db_w, sp, r2.bit() | r3.bit());
// Perform tail call to the entry. // Perform tail call to the entry.
__ TailCallRuntime(f, 2, 1); __ TailCallRuntime(ExternalReference(IC_Utility(kLoadIC_Miss)), 2, 1);
} }
@ -530,11 +522,20 @@ Object* KeyedLoadIC_Miss(Arguments args);
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss))); // ---------- S t a t e --------------
// -- lr : return address
// -- sp[0] : key
// -- sp[4] : receiver
// -----------------------------------
__ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r2.bit() | r3.bit());
__ TailCallRuntime(ExternalReference(IC_Utility(kKeyedLoadIC_Miss)), 2, 1);
} }
void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) { void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// ---------- S t a t e -------------- // ---------- S t a t e --------------
// -- lr : return address // -- lr : return address
// -- sp[0] : key // -- sp[0] : key
@ -544,7 +545,7 @@ void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
__ ldm(ia, sp, r2.bit() | r3.bit()); __ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r2.bit() | r3.bit()); __ stm(db_w, sp, r2.bit() | r3.bit());
__ TailCallRuntime(f, 2, 1); __ TailCallRuntime(ExternalReference(Runtime::kGetProperty), 2, 1);
} }
@ -558,17 +559,11 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Get the key and receiver object from the stack. // Get the key and receiver object from the stack.
__ ldm(ia, sp, r0.bit() | r1.bit()); __ ldm(ia, sp, r0.bit() | r1.bit());
// Check that the key is a smi.
__ tst(r0, Operand(kSmiTagMask));
__ b(ne, &slow);
__ mov(r0, Operand(r0, ASR, kSmiTagSize));
// Check that the object isn't a smi.
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &slow);
// Check that the object isn't a smi.
__ BranchOnSmi(r1, &slow);
// Get the map of the receiver. // Get the map of the receiver.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
// Check bit field. // Check bit field.
__ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset)); __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
__ tst(r3, Operand(kSlowCaseBitFieldMask)); __ tst(r3, Operand(kSlowCaseBitFieldMask));
@ -582,6 +577,10 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ cmp(r2, Operand(JS_OBJECT_TYPE)); __ cmp(r2, Operand(JS_OBJECT_TYPE));
__ b(lt, &slow); __ b(lt, &slow);
// Check that the key is a smi.
__ BranchOnNotSmi(r0, &slow);
__ mov(r0, Operand(r0, ASR, kSmiTagSize));
// Get the elements array of the object. // Get the elements array of the object.
__ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset)); __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary). // Check that the object is in fast mode (not dictionary).
@ -597,10 +596,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Slow case: Push extra copies of the arguments (2). // Slow case: Push extra copies of the arguments (2).
__ bind(&slow); __ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1); __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1);
__ ldm(ia, sp, r0.bit() | r1.bit()); GenerateRuntimeGetProperty(masm);
__ stm(db_w, sp, r0.bit() | r1.bit());
// Do tail-call to runtime routine.
__ TailCallRuntime(ExternalReference(Runtime::kGetProperty), 2, 1);
// Fast case: Do the load. // Fast case: Do the load.
__ bind(&fast); __ bind(&fast);
@ -634,8 +630,47 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
} }
void KeyedStoreIC::Generate(MacroAssembler* masm, void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
const ExternalReference& f) { // ---------- S t a t e --------------
// -- lr : return address
// -- sp[0] : key
// -- sp[4] : receiver
// -----------------------------------
Label slow;
// Get the key and receiver object from the stack.
__ ldm(ia, sp, r0.bit() | r1.bit());
// Check that the receiver isn't a smi.
__ BranchOnSmi(r1, &slow);
// Check that the key is a smi.
__ BranchOnNotSmi(r0, &slow);
// Get the map of the receiver.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
// Check that it has indexed interceptor and access checks
// are not enabled for this object.
__ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
__ and_(r3, r3, Operand(kSlowCaseBitFieldMask));
__ cmp(r3, Operand(1 << Map::kHasIndexedInterceptor));
__ b(ne, &slow);
// Everything is fine, call runtime.
__ push(r1); // receiver
__ push(r0); // key
// Perform tail call to the entry.
__ TailCallRuntime(ExternalReference(
IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1);
__ bind(&slow);
GenerateMiss(masm);
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// ---------- S t a t e -------------- // ---------- S t a t e --------------
// -- r0 : value // -- r0 : value
// -- lr : return address // -- lr : return address
@ -646,7 +681,21 @@ void KeyedStoreIC::Generate(MacroAssembler* masm,
__ ldm(ia, sp, r2.bit() | r3.bit()); __ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit()); __ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
__ TailCallRuntime(f, 3, 1); __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedStoreIC_Miss)), 3, 1);
}
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
// -- lr : return address
// -- sp[0] : key
// -- sp[1] : receiver
// -----------------------------------
__ ldm(ia, sp, r1.bit() | r3.bit()); // r0 == value, r1 == key, r3 == object
__ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit());
__ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
} }
@ -701,12 +750,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ b(lo, &fast); __ b(lo, &fast);
// Slow case: Push extra copies of the arguments (3). // Slow case:
__ bind(&slow); __ bind(&slow);
__ ldm(ia, sp, r1.bit() | r3.bit()); // r0 == value, r1 == key, r3 == object GenerateRuntimeSetProperty(masm);
__ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit());
// Do tail-call to runtime routine.
__ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
// Extra capacity case: Check if there is extra capacity to // Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one // perform the store and update the length. Used for adding one
@ -777,33 +823,15 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
} }
void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
// -- lr : return address
// -- sp[0] : key
// -- sp[1] : receiver
// ----------- S t a t e -------------
__ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
// Perform tail call to the entry.
__ TailCallRuntime(
ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : value // -- r0 : value
// -- r1 : receiver
// -- r2 : name // -- r2 : name
// -- lr : return address // -- lr : return address
// -- [sp] : receiver
// ----------------------------------- // -----------------------------------
// Get the receiver from the stack and probe the stub cache. // Get the receiver from the stack and probe the stub cache.
__ ldr(r1, MemOperand(sp));
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP, NOT_IN_LOOP,
MONOMORPHIC); MONOMORPHIC);
@ -814,36 +842,66 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
} }
void StoreIC::GenerateExtendStorage(MacroAssembler* masm) { void StoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : value // -- r0 : value
// -- r1 : receiver
// -- r2 : name // -- r2 : name
// -- lr : return address // -- lr : return address
// -- [sp] : receiver
// ----------------------------------- // -----------------------------------
__ ldr(r3, MemOperand(sp)); // copy receiver __ push(r1);
__ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit()); __ stm(db_w, sp, r2.bit() | r0.bit());
// Perform tail call to the entry. // Perform tail call to the entry.
__ TailCallRuntime( __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
} }
void StoreIC::GenerateMiss(MacroAssembler* masm) { void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : value // -- r0 : value
// -- r1 : receiver
// -- r2 : name // -- r2 : name
// -- lr : return address // -- lr : return address
// -- [sp] : receiver
// ----------------------------------- // -----------------------------------
//
// This accepts as a receiver anything JSObject::SetElementsLength accepts
// (currently anything except for external and pixel arrays which means
// anything with elements of FixedArray type.), but currently is restricted
// to JSArray.
// Value must be a number, but only smis are accepted as the most common case.
__ ldr(r3, MemOperand(sp)); // copy receiver Label miss;
__ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
// Perform tail call to the entry. Register receiver = r1;
__ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1); Register value = r0;
Register scratch = r3;
// Check that the receiver isn't a smi.
__ BranchOnSmi(receiver, &miss);
// Check that the object is a JS array.
__ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
__ b(ne, &miss);
// Check that elements are FixedArray.
__ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
__ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
__ b(ne, &miss);
// Check that value is a smi.
__ BranchOnNotSmi(value, &miss);
// Prepare tail call to StoreIC_ArrayLength.
__ push(receiver);
__ push(value);
__ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_ArrayLength)), 2, 1);
__ bind(&miss);
GenerateMiss(masm);
} }

264
deps/v8/src/arm/macro-assembler-arm.cc

@ -37,7 +37,6 @@ namespace internal {
MacroAssembler::MacroAssembler(void* buffer, int size) MacroAssembler::MacroAssembler(void* buffer, int size)
: Assembler(buffer, size), : Assembler(buffer, size),
unresolved_(0),
generating_stub_(false), generating_stub_(false),
allow_stub_calls_(true), allow_stub_calls_(true),
code_object_(Heap::undefined_value()) { code_object_(Heap::undefined_value()) {
@ -196,7 +195,7 @@ void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
void MacroAssembler::LoadRoot(Register destination, void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index, Heap::RootListIndex index,
Condition cond) { Condition cond) {
ldr(destination, MemOperand(r10, index << kPointerSizeLog2), cond); ldr(destination, MemOperand(roots, index << kPointerSizeLog2), cond);
} }
@ -331,14 +330,10 @@ void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
// Push in reverse order: caller_fp, sp_on_exit, and caller_pc. // Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
stm(db_w, sp, fp.bit() | ip.bit() | lr.bit()); stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
mov(fp, Operand(sp)); // setup new frame pointer mov(fp, Operand(sp)); // Setup new frame pointer.
if (mode == ExitFrame::MODE_DEBUG) { mov(ip, Operand(CodeObject()));
mov(ip, Operand(Smi::FromInt(0))); push(ip); // Accessed from ExitFrame::code_slot.
} else {
mov(ip, Operand(CodeObject()));
}
push(ip);
// Save the frame pointer and the context in top. // Save the frame pointer and the context in top.
mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
@ -550,6 +545,21 @@ void MacroAssembler::InvokeFunction(Register fun,
} }
void MacroAssembler::InvokeFunction(JSFunction* function,
const ParameterCount& actual,
InvokeFlag flag) {
ASSERT(function->is_compiled());
// Get the function and setup the context.
mov(r1, Operand(Handle<JSFunction>(function)));
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Invoke the cached code.
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
}
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::SaveRegistersToMemory(RegList regs) { void MacroAssembler::SaveRegistersToMemory(RegList regs) {
ASSERT((regs & ~kJSCallerSaved) == 0); ASSERT((regs & ~kJSCallerSaved) == 0);
@ -608,6 +618,15 @@ void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
} }
} }
} }
void MacroAssembler::DebugBreak() {
ASSERT(allow_stub_calls());
mov(r0, Operand(0));
mov(r1, Operand(ExternalReference(Runtime::kDebugBreak)));
CEntryStub ces(1);
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
#endif #endif
@ -940,6 +959,113 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object,
} }
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
add(scratch1, scratch1,
Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
// AllocateInNewSpace expects the size in words, so we can round down
// to kObjectAlignment and divide by kPointerSize in the same shift.
ASSERT_EQ(kPointerSize, kObjectAlignmentMask + 1);
mov(scratch1, Operand(scratch1, ASR, kPointerSizeLog2));
// Allocate two-byte string in new space.
AllocateInNewSpace(scratch1,
result,
scratch2,
scratch3,
gc_required,
TAG_OBJECT);
// Set the map, length and hash field.
LoadRoot(scratch1, Heap::kStringMapRootIndex);
str(length, FieldMemOperand(result, String::kLengthOffset));
str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
mov(scratch2, Operand(String::kEmptyHashField));
str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
}
void MacroAssembler::AllocateAsciiString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
ASSERT(kCharSize == 1);
add(scratch1, length,
Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize));
// AllocateInNewSpace expects the size in words, so we can round down
// to kObjectAlignment and divide by kPointerSize in the same shift.
ASSERT_EQ(kPointerSize, kObjectAlignmentMask + 1);
mov(scratch1, Operand(scratch1, ASR, kPointerSizeLog2));
// Allocate ASCII string in new space.
AllocateInNewSpace(scratch1,
result,
scratch2,
scratch3,
gc_required,
TAG_OBJECT);
// Set the map, length and hash field.
LoadRoot(scratch1, Heap::kAsciiStringMapRootIndex);
mov(scratch1, Operand(Factory::ascii_string_map()));
str(length, FieldMemOperand(result, String::kLengthOffset));
str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
mov(scratch2, Operand(String::kEmptyHashField));
str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
}
void MacroAssembler::AllocateTwoByteConsString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
AllocateInNewSpace(ConsString::kSize / kPointerSize,
result,
scratch1,
scratch2,
gc_required,
TAG_OBJECT);
LoadRoot(scratch1, Heap::kConsStringMapRootIndex);
mov(scratch2, Operand(String::kEmptyHashField));
str(length, FieldMemOperand(result, String::kLengthOffset));
str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
}
void MacroAssembler::AllocateAsciiConsString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
AllocateInNewSpace(ConsString::kSize / kPointerSize,
result,
scratch1,
scratch2,
gc_required,
TAG_OBJECT);
LoadRoot(scratch1, Heap::kConsAsciiStringMapRootIndex);
mov(scratch2, Operand(String::kEmptyHashField));
str(length, FieldMemOperand(result, String::kLengthOffset));
str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
str(scratch2, FieldMemOperand(result, String::kHashFieldOffset));
}
void MacroAssembler::CompareObjectType(Register function, void MacroAssembler::CompareObjectType(Register function,
Register map, Register map,
Register type_reg, Register type_reg,
@ -957,6 +1083,21 @@ void MacroAssembler::CompareInstanceType(Register map,
} }
void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
Label* fail,
bool is_heap_object) {
if (!is_heap_object) {
BranchOnSmi(obj, fail);
}
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
mov(ip, Operand(map));
cmp(scratch, ip);
b(ne, fail);
}
void MacroAssembler::TryGetFunctionPrototype(Register function, void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result, Register result,
Register scratch, Register scratch,
@ -1010,10 +1151,17 @@ void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
} }
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
void MacroAssembler::StubReturn(int argc) { void MacroAssembler::StubReturn(int argc) {
ASSERT(argc >= 1 && generating_stub()); ASSERT(argc >= 1 && generating_stub());
if (argc > 1) if (argc > 1) {
add(sp, sp, Operand((argc - 1) * kPointerSize)); add(sp, sp, Operand((argc - 1) * kPointerSize));
}
Ret(); Ret();
} }
@ -1037,6 +1185,18 @@ void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
} }
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
if (CpuFeatures::IsSupported(ARMv7)) {
ubfx(dst, src, Operand(kSmiTagSize), Operand(num_least_bits - 1));
} else {
mov(dst, Operand(src, ASR, kSmiTagSize));
and_(dst, dst, Operand((1 << num_least_bits) - 1));
}
}
void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) { void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
// All parameters are on the stack. r0 has the return value after call. // All parameters are on the stack. r0 has the return value after call.
@ -1064,6 +1224,16 @@ void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
} }
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
int num_arguments) {
mov(r0, Operand(num_arguments));
mov(r1, Operand(ext));
CEntryStub stub(1);
CallStub(&stub);
}
void MacroAssembler::TailCallRuntime(const ExternalReference& ext, void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
int num_arguments, int num_arguments,
int result_size) { int result_size) {
@ -1087,58 +1257,28 @@ void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
} }
Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
bool* resolved) {
// Contract with compiled functions is that the function is passed in r1.
int builtins_offset =
JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
ldr(r1, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
ldr(r1, FieldMemOperand(r1, builtins_offset));
return Builtins::GetCode(id, resolved);
}
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags) { InvokeJSFlags flags) {
bool resolved; GetBuiltinEntry(r2, id);
Handle<Code> code = ResolveBuiltin(id, &resolved);
if (flags == CALL_JS) { if (flags == CALL_JS) {
Call(code, RelocInfo::CODE_TARGET); Call(r2);
} else { } else {
ASSERT(flags == JUMP_JS); ASSERT(flags == JUMP_JS);
Jump(code, RelocInfo::CODE_TARGET); Jump(r2);
}
if (!resolved) {
const char* name = Builtins::GetName(id);
int argc = Builtins::GetArgumentsCount(id);
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
Bootstrapper::FixupFlagsUseCodeObject::encode(false);
Unresolved entry = { pc_offset() - kInstrSize, flags, name };
unresolved_.Add(entry);
} }
} }
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
bool resolved; // Load the JavaScript builtin function from the builtins object.
Handle<Code> code = ResolveBuiltin(id, &resolved); ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
ldr(r1, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
mov(target, Operand(code)); int builtins_offset =
if (!resolved) { JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
const char* name = Builtins::GetName(id); ldr(r1, FieldMemOperand(r1, builtins_offset));
int argc = Builtins::GetArgumentsCount(id); // Load the code entry point from the function into the target register.
uint32_t flags = ldr(target, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) | ldr(target, FieldMemOperand(target, SharedFunctionInfo::kCodeOffset));
Bootstrapper::FixupFlagsUseCodeObject::encode(true);
Unresolved entry = { pc_offset() - kInstrSize, flags, name };
unresolved_.Add(entry);
}
add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
} }
@ -1238,6 +1378,26 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
} }
void MacroAssembler::JumpIfNotBothSmi(Register reg1,
Register reg2,
Label* on_not_both_smi) {
ASSERT_EQ(0, kSmiTag);
tst(reg1, Operand(kSmiTagMask));
tst(reg2, Operand(kSmiTagMask), eq);
b(ne, on_not_both_smi);
}
void MacroAssembler::JumpIfEitherSmi(Register reg1,
Register reg2,
Label* on_either_smi) {
ASSERT_EQ(0, kSmiTag);
tst(reg1, Operand(kSmiTagMask));
tst(reg2, Operand(kSmiTagMask), ne);
b(eq, on_either_smi);
}
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
Register first, Register first,
Register second, Register second,

127
deps/v8/src/arm/macro-assembler-arm.h

@ -33,10 +33,18 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// ----------------------------------------------------------------------------
// Static helper functions
// Generate a MemOperand for loading a field from an object.
static inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
// Give alias names to registers // Give alias names to registers
const Register cp = { 8 }; // JavaScript context pointer const Register cp = { 8 }; // JavaScript context pointer
const Register roots = { 10 }; // Roots array pointer.
enum InvokeJSFlags { enum InvokeJSFlags {
CALL_JS, CALL_JS,
@ -49,14 +57,7 @@ class MacroAssembler: public Assembler {
public: public:
MacroAssembler(void* buffer, int size); MacroAssembler(void* buffer, int size);
// --------------------------------------------------------------------------- // Jump, Call, and Ret pseudo instructions implementing inter-working.
// Low-level helpers for compiler
// Jump, Call, and Ret pseudo instructions implementing inter-working
private:
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
public:
void Jump(Register target, Condition cond = al); void Jump(Register target, Condition cond = al);
void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al); void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
@ -134,6 +135,10 @@ class MacroAssembler: public Assembler {
const ParameterCount& actual, const ParameterCount& actual,
InvokeFlag flag); InvokeFlag flag);
void InvokeFunction(JSFunction* function,
const ParameterCount& actual,
InvokeFlag flag);
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@ -145,6 +150,7 @@ class MacroAssembler: public Assembler {
void CopyRegistersFromStackToMemory(Register base, void CopyRegistersFromStackToMemory(Register base,
Register scratch, Register scratch,
RegList regs); RegList regs);
void DebugBreak();
#endif #endif
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@ -209,6 +215,31 @@ class MacroAssembler: public Assembler {
// allocation is undone. // allocation is undone.
void UndoAllocationInNewSpace(Register object, Register scratch); void UndoAllocationInNewSpace(Register object, Register scratch);
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required);
void AllocateAsciiString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required);
void AllocateTwoByteConsString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
void AllocateAsciiConsString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Support functions. // Support functions.
@ -243,6 +274,29 @@ class MacroAssembler: public Assembler {
Register type_reg, Register type_reg,
InstanceType type); InstanceType type);
// Check if the map of an object is equal to a specified map and
// branch to label if not. Skip the smi check if not required
// (object is known to be a heap object)
void CheckMap(Register obj,
Register scratch,
Handle<Map> map,
Label* fail,
bool is_heap_object);
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
// Returns a condition that will be enabled if the object was a string.
Condition IsObjectStringType(Register obj,
Register type) {
ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset));
ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
tst(type, Operand(kIsNotStringMask));
ASSERT_EQ(0, kStringTag);
return eq;
}
inline void BranchOnSmi(Register value, Label* smi_label) { inline void BranchOnSmi(Register value, Label* smi_label) {
tst(value, Operand(kSmiTagMask)); tst(value, Operand(kSmiTagMask));
b(eq, smi_label); b(eq, smi_label);
@ -257,6 +311,9 @@ class MacroAssembler: public Assembler {
// occurred. // occurred.
void IllegalOperation(int num_arguments); void IllegalOperation(int num_arguments);
// Get the number of least significant bits from a register
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
// Uses VFP instructions to Convert a Smi to a double. // Uses VFP instructions to Convert a Smi to a double.
void IntegerToDoubleConversionWithVFP3(Register inReg, void IntegerToDoubleConversionWithVFP3(Register inReg,
Register outHighReg, Register outHighReg,
@ -269,6 +326,9 @@ class MacroAssembler: public Assembler {
// Call a code stub. // Call a code stub.
void CallStub(CodeStub* stub, Condition cond = al); void CallStub(CodeStub* stub, Condition cond = al);
// Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al);
// Return from a code stub after popping its arguments. // Return from a code stub after popping its arguments.
void StubReturn(int argc); void StubReturn(int argc);
@ -279,6 +339,10 @@ class MacroAssembler: public Assembler {
// Convenience function: Same as above, but takes the fid instead. // Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments); void CallRuntime(Runtime::FunctionId fid, int num_arguments);
// Convenience function: call an external reference.
void CallExternalReference(const ExternalReference& ext,
int num_arguments);
// Tail call of a runtime routine (jump). // Tail call of a runtime routine (jump).
// Like JumpToRuntime, but also takes care of passing the number // Like JumpToRuntime, but also takes care of passing the number
// of parameters. // of parameters.
@ -297,13 +361,6 @@ class MacroAssembler: public Assembler {
// setup the function in r1. // setup the function in r1.
void GetBuiltinEntry(Register target, Builtins::JavaScript id); void GetBuiltinEntry(Register target, Builtins::JavaScript id);
struct Unresolved {
int pc;
uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
const char* name;
};
List<Unresolved>* unresolved() { return &unresolved_; }
Handle<Object> CodeObject() { return code_object_; } Handle<Object> CodeObject() { return code_object_; }
@ -337,6 +394,14 @@ class MacroAssembler: public Assembler {
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; } void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; } bool allow_stub_calls() { return allow_stub_calls_; }
// ---------------------------------------------------------------------------
// Smi utilities
// Jump if either of the registers contain a non-smi.
void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
// Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// String utilities // String utilities
@ -357,11 +422,8 @@ class MacroAssembler: public Assembler {
Label* not_flat_ascii_strings); Label* not_flat_ascii_strings);
private: private:
List<Unresolved> unresolved_; void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
bool generating_stub_; void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
bool allow_stub_calls_;
Handle<Object> code_object_; // This handle will be patched with the code
// object on installation.
// Helper functions for generating invokes. // Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected, void InvokePrologue(const ParameterCount& expected,
@ -371,21 +433,14 @@ class MacroAssembler: public Assembler {
Label* done, Label* done,
InvokeFlag flag); InvokeFlag flag);
// Prepares for a call or jump to a builtin by doing two things:
// 1. Emits code that fetches the builtin's function object from the context
// at runtime, and puts it in the register rdi.
// 2. Fetches the builtin's code object, and returns it in a handle, at
// compile time, so that later code can emit instructions to jump or call
// the builtin directly. If the code object has not yet been created, it
// returns the builtin code object for IllegalFunction, and sets the
// output parameter "resolved" to false. Code that uses the return value
// should then add the address and the builtin name to the list of fixups
// called unresolved_, which is fixed up by the bootstrapper.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
// Activation support. // Activation support.
void EnterFrame(StackFrame::Type type); void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type); void LeaveFrame(StackFrame::Type type);
bool generating_stub_;
bool allow_stub_calls_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
}; };
@ -421,12 +476,6 @@ class CodePatcher {
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Static helper functions. // Static helper functions.
// Generate a MemOperand for loading a field from an object.
static inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
#ifdef GENERATED_CODE_COVERAGE #ifdef GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x #define CODE_COVERAGE_STRINGIFY(x) #x
#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)

29
deps/v8/src/arm/simulator-arm.cc

@ -1741,7 +1741,7 @@ void Simulator::DecodeType2(Instr* instr) {
void Simulator::DecodeType3(Instr* instr) { void Simulator::DecodeType3(Instr* instr) {
ASSERT(instr->Bit(4) == 0); ASSERT(instr->Bits(6, 4) == 0x5 || instr->Bit(4) == 0);
int rd = instr->RdField(); int rd = instr->RdField();
int rn = instr->RnField(); int rn = instr->RnField();
int32_t rn_val = get_register(rn); int32_t rn_val = get_register(rn);
@ -1768,10 +1768,26 @@ void Simulator::DecodeType3(Instr* instr) {
break; break;
} }
case 3: { case 3: {
// Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w"); if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
addr = rn_val + shifter_operand; uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
if (instr->HasW()) { uint32_t lsbit = static_cast<uint32_t>(instr->ShiftAmountField());
set_register(rn, addr); uint32_t msbit = widthminus1 + lsbit;
if (msbit <= 31) {
uint32_t rm_val =
static_cast<uint32_t>(get_register(instr->RmField()));
uint32_t extr_val = rm_val << (31 - msbit);
extr_val = extr_val >> (31 - widthminus1);
set_register(instr->RdField(), extr_val);
} else {
UNREACHABLE();
}
return;
} else {
// Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
addr = rn_val + shifter_operand;
if (instr->HasW()) {
set_register(rn, addr);
}
} }
break; break;
} }
@ -1785,7 +1801,8 @@ void Simulator::DecodeType3(Instr* instr) {
uint8_t byte = ReadB(addr); uint8_t byte = ReadB(addr);
set_register(rd, byte); set_register(rd, byte);
} else { } else {
UNIMPLEMENTED(); uint8_t byte = get_register(rd);
WriteB(addr, byte);
} }
} else { } else {
if (instr->HasL()) { if (instr->HasL()) {

467
deps/v8/src/arm/stub-cache-arm.cc

@ -189,8 +189,9 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
} }
// Generate code to check if an object is a string. If the object is // Generate code to check if an object is a string. If the object is a
// a string, the map's instance type is left in the scratch1 register. // heap object, its map's instance type is left in the scratch1 register.
// If this is not needed, scratch1 and scratch2 may be the same register.
static void GenerateStringCheck(MacroAssembler* masm, static void GenerateStringCheck(MacroAssembler* masm,
Register receiver, Register receiver,
Register scratch1, Register scratch1,
@ -215,18 +216,16 @@ static void GenerateStringCheck(MacroAssembler* masm,
// If the receiver object is not a string or a wrapped string object the // If the receiver object is not a string or a wrapped string object the
// execution continues at the miss label. The register containing the // execution continues at the miss label. The register containing the
// receiver is potentially clobbered. // receiver is potentially clobbered.
void StubCompiler::GenerateLoadStringLength2(MacroAssembler* masm, void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver, Register receiver,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Label* miss) { Label* miss) {
Label check_string, check_wrapper; Label check_wrapper;
__ bind(&check_string);
// Check if the object is a string leaving the instance type in the // Check if the object is a string leaving the instance type in the
// scratch1 register. // scratch1 register.
GenerateStringCheck(masm, receiver, scratch1, scratch2, GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
miss, &check_wrapper);
// Load length directly from the string. // Load length directly from the string.
__ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset)); __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
@ -238,9 +237,12 @@ void StubCompiler::GenerateLoadStringLength2(MacroAssembler* masm,
__ cmp(scratch1, Operand(JS_VALUE_TYPE)); __ cmp(scratch1, Operand(JS_VALUE_TYPE));
__ b(ne, miss); __ b(ne, miss);
// Unwrap the value in place and check if the wrapped value is a string. // Unwrap the value and check if the wrapped value is a string.
__ ldr(receiver, FieldMemOperand(receiver, JSValue::kValueOffset)); __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
__ b(&check_string); GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
__ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
__ Ret();
} }
@ -256,10 +258,10 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
// Generate StoreField code, value is passed in r0 register. // Generate StoreField code, value is passed in r0 register.
// After executing generated code, the receiver_reg and name_reg // When leaving generated code after success, the receiver_reg and name_reg
// may be clobbered. // may be clobbered. Upon branch to miss_label, the receiver and name
// registers have their original values.
void StubCompiler::GenerateStoreField(MacroAssembler* masm, void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Builtins::Name storage_extend,
JSObject* object, JSObject* object,
int index, int index,
Map* transition, Map* transition,
@ -292,11 +294,12 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) { if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
// The properties must be extended before we can store the value. // The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array. // We jump to a runtime call that extends the properties array.
__ push(receiver_reg);
__ mov(r2, Operand(Handle<Map>(transition))); __ mov(r2, Operand(Handle<Map>(transition)));
// Please note, if we implement keyed store for arm we need __ stm(db_w, sp, r2.bit() | r0.bit());
// to call the Builtins::KeyedStoreIC_ExtendStorage. __ TailCallRuntime(
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_ExtendStorage)); ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)),
__ Jump(ic, RelocInfo::CODE_TARGET); 3, 1);
return; return;
} }
@ -373,7 +376,7 @@ static void GenerateCallFunction(MacroAssembler* masm,
// Check that the function really is a function. // Check that the function really is a function.
__ BranchOnSmi(r1, miss); __ BranchOnSmi(r1, miss);
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
__ b(ne, miss); __ b(ne, miss);
// Patch the receiver on the stack with the global proxy if // Patch the receiver on the stack with the global proxy if
@ -388,68 +391,6 @@ static void GenerateCallFunction(MacroAssembler* masm,
} }
static void GenerateCallConstFunction(MacroAssembler* masm,
JSFunction* function,
const ParameterCount& arguments) {
ASSERT(function->is_compiled());
// Get the function and setup the context.
__ mov(r1, Operand(Handle<JSFunction>(function)));
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
__ InvokeCode(code, expected, arguments,
RelocInfo::CODE_TARGET, JUMP_FUNCTION);
}
template <class Compiler>
static void CompileLoadInterceptor(Compiler* compiler,
StubCompiler* stub_compiler,
MacroAssembler* masm,
JSObject* object,
JSObject* holder,
String* name,
LookupResult* lookup,
Register receiver,
Register scratch1,
Register scratch2,
Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
__ BranchOnSmi(receiver, miss);
// Check that the maps haven't changed.
Register reg =
stub_compiler->CheckPrototypes(object, receiver, holder,
scratch1, scratch2, name, miss);
if (lookup->IsValid() && lookup->IsCacheable()) {
compiler->CompileCacheable(masm,
stub_compiler,
receiver,
reg,
scratch1,
scratch2,
holder,
lookup,
name,
miss);
} else {
compiler->CompileRegular(masm,
receiver,
reg,
scratch2,
holder,
miss);
}
}
static void PushInterceptorArguments(MacroAssembler* masm, static void PushInterceptorArguments(MacroAssembler* masm,
Register receiver, Register receiver,
Register holder, Register holder,
@ -500,7 +441,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
LookupResult* lookup, LookupResult* lookup,
String* name, String* name,
Label* miss_label) { Label* miss_label) {
AccessorInfo* callback = 0; AccessorInfo* callback = NULL;
bool optimize = false; bool optimize = false;
// So far the most popular follow ups for interceptor loads are FIELD // So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added // and CALLBACKS, so inline only them, other cases may be added
@ -523,9 +464,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
// Note: starting a frame here makes GC aware of pointers pushed below. // Note: starting a frame here makes GC aware of pointers pushed below.
__ EnterInternalFrame(); __ EnterInternalFrame();
if (lookup->type() == CALLBACKS) { __ push(receiver);
__ push(receiver);
}
__ push(holder); __ push(holder);
__ push(name_); __ push(name_);
@ -546,10 +485,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
__ bind(&interceptor_failed); __ bind(&interceptor_failed);
__ pop(name_); __ pop(name_);
__ pop(holder); __ pop(holder);
__ pop(receiver);
if (lookup->type() == CALLBACKS) {
__ pop(receiver);
}
__ LeaveInternalFrame(); __ LeaveInternalFrame();
@ -621,108 +557,48 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
}; };
class CallInterceptorCompiler BASE_EMBEDDED { static void CompileLoadInterceptor(LoadInterceptorCompiler* compiler,
public: StubCompiler* stub_compiler,
CallInterceptorCompiler(const ParameterCount& arguments, Register name) MacroAssembler* masm,
: arguments_(arguments), argc_(arguments.immediate()), name_(name) {} JSObject* object,
JSObject* holder,
void CompileCacheable(MacroAssembler* masm, String* name,
StubCompiler* stub_compiler, LookupResult* lookup,
Register receiver, Register receiver,
Register holder, Register scratch1,
Register scratch1, Register scratch2,
Register scratch2, Label* miss) {
JSObject* holder_obj, ASSERT(holder->HasNamedInterceptor());
LookupResult* lookup, ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
String* name,
Label* miss_label) {
JSFunction* function = 0;
bool optimize = false;
// So far the most popular case for failed interceptor is
// CONSTANT_FUNCTION sitting below.
if (lookup->type() == CONSTANT_FUNCTION) {
function = lookup->GetConstantFunction();
// JSArray holder is a special case for call constant function
// (see the corresponding code).
if (function->is_compiled() && !holder_obj->IsJSArray()) {
optimize = true;
}
}
if (!optimize) {
CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
return;
}
// Constant functions cannot sit on global object.
ASSERT(!lookup->holder()->IsGlobalObject());
__ EnterInternalFrame();
__ push(holder); // Save the holder.
__ push(name_); // Save the name.
CompileCallLoadPropertyWithInterceptor(masm,
receiver,
holder,
name_,
holder_obj);
ASSERT(!r0.is(name_));
ASSERT(!r0.is(scratch1));
__ pop(name_); // Restore the name.
__ pop(scratch1); // Restore the holder.
__ LeaveInternalFrame();
// Compare with no_interceptor_result_sentinel.
__ LoadRoot(scratch2, Heap::kNoInterceptorResultSentinelRootIndex);
__ cmp(r0, scratch2);
Label invoke;
__ b(ne, &invoke);
stub_compiler->CheckPrototypes(holder_obj, scratch1,
lookup->holder(), scratch1,
scratch2,
name,
miss_label);
GenerateCallConstFunction(masm, function, arguments_);
__ bind(&invoke); // Check that the receiver isn't a smi.
} __ BranchOnSmi(receiver, miss);
void CompileRegular(MacroAssembler* masm, // Check that the maps haven't changed.
Register receiver, Register reg =
Register holder, stub_compiler->CheckPrototypes(object, receiver, holder,
Register scratch, scratch1, scratch2, name, miss);
JSObject* holder_obj,
Label* miss_label) {
__ EnterInternalFrame();
// Save the name_ register across the call.
__ push(name_);
PushInterceptorArguments(masm, if (lookup->IsProperty() && lookup->IsCacheable()) {
compiler->CompileCacheable(masm,
stub_compiler,
receiver,
reg,
scratch1,
scratch2,
holder,
lookup,
name,
miss);
} else {
compiler->CompileRegular(masm,
receiver, receiver,
reg,
scratch2,
holder, holder,
name_, miss);
holder_obj);
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForCall));
__ mov(r0, Operand(5));
__ mov(r1, Operand(ref));
CEntryStub stub(1);
__ CallStub(&stub);
// Restore the name_ register.
__ pop(name_);
__ LeaveInternalFrame();
} }
}
private:
const ParameterCount& arguments_;
int argc_;
Register name_;
};
#undef __ #undef __
@ -735,7 +611,11 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
Register holder_reg, Register holder_reg,
Register scratch, Register scratch,
String* name, String* name,
int save_at_depth,
Label* miss) { Label* miss) {
// TODO(602): support object saving.
ASSERT(save_at_depth == kInvalidProtoDepth);
// Check that the maps haven't changed. // Check that the maps haven't changed.
Register result = Register result =
masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch, miss); masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch, miss);
@ -762,7 +642,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object,
object = JSObject::cast(object->GetPrototype()); object = JSObject::cast(object->GetPrototype());
} }
// Return the register containin the holder. // Return the register containing the holder.
return result; return result;
} }
@ -901,12 +781,13 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
} }
Object* CallStubCompiler::CompileCallField(Object* object, Object* CallStubCompiler::CompileCallField(JSObject* object,
JSObject* holder, JSObject* holder,
int index, int index,
String* name) { String* name) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- lr: return address // -- r2 : name
// -- lr : return address
// ----------------------------------- // -----------------------------------
Label miss; Label miss;
@ -919,8 +800,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
__ b(eq, &miss); __ b(eq, &miss);
// Do the right check and compute the holder register. // Do the right check and compute the holder register.
Register reg = Register reg = CheckPrototypes(object, r0, holder, r1, r3, name, &miss);
CheckPrototypes(JSObject::cast(object), r0, holder, r3, r2, name, &miss);
GenerateFastPropertyLoad(masm(), r1, reg, holder, index); GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
GenerateCallFunction(masm(), object, arguments(), &miss); GenerateCallFunction(masm(), object, arguments(), &miss);
@ -941,7 +821,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
String* name, String* name,
CheckType check) { CheckType check) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- lr: return address // -- r2 : name
// -- lr : return address
// ----------------------------------- // -----------------------------------
Label miss; Label miss;
@ -962,7 +843,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
switch (check) { switch (check) {
case RECEIVER_MAP_CHECK: case RECEIVER_MAP_CHECK:
// Check that the maps haven't changed. // Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), r1, holder, r3, r2, name, &miss); CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss);
// Patch the receiver on the stack with the global proxy if // Patch the receiver on the stack with the global proxy if
// necessary. // necessary.
@ -978,13 +859,13 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ jmp(&miss); __ jmp(&miss);
} else { } else {
// Check that the object is a two-byte string or a symbol. // Check that the object is a two-byte string or a symbol.
__ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE); __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
__ b(hs, &miss); __ b(hs, &miss);
// Check that the maps starting from the prototype haven't changed. // Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(), GenerateLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX, Context::STRING_FUNCTION_INDEX,
r2); r0);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3, CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
r1, name, &miss); r1, name, &miss);
} }
break; break;
@ -998,14 +879,14 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the object is a smi or a heap number. // Check that the object is a smi or a heap number.
__ tst(r1, Operand(kSmiTagMask)); __ tst(r1, Operand(kSmiTagMask));
__ b(eq, &fast); __ b(eq, &fast);
__ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE); __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE);
__ b(ne, &miss); __ b(ne, &miss);
__ bind(&fast); __ bind(&fast);
// Check that the maps starting from the prototype haven't changed. // Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(), GenerateLoadGlobalFunctionPrototype(masm(),
Context::NUMBER_FUNCTION_INDEX, Context::NUMBER_FUNCTION_INDEX,
r2); r0);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3, CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
r1, name, &miss); r1, name, &miss);
} }
break; break;
@ -1028,22 +909,22 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the maps starting from the prototype haven't changed. // Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(), GenerateLoadGlobalFunctionPrototype(masm(),
Context::BOOLEAN_FUNCTION_INDEX, Context::BOOLEAN_FUNCTION_INDEX,
r2); r0);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3, CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
r1, name, &miss); r1, name, &miss);
} }
break; break;
} }
case JSARRAY_HAS_FAST_ELEMENTS_CHECK: case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
CheckPrototypes(JSObject::cast(object), r1, holder, r3, r2, name, &miss); CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss);
// Make sure object->HasFastElements(). // Make sure object->HasFastElements().
// Get the elements array of the object. // Get the elements array of the object.
__ ldr(r3, FieldMemOperand(r1, JSObject::kElementsOffset)); __ ldr(r3, FieldMemOperand(r1, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary). // Check that the object is in fast mode (not dictionary).
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); __ ldr(r0, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r2, ip); __ cmp(r0, ip);
__ b(ne, &miss); __ b(ne, &miss);
break; break;
@ -1051,7 +932,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
UNREACHABLE(); UNREACHABLE();
} }
GenerateCallConstFunction(masm(), function, arguments()); __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
// Handle call cache miss. // Handle call cache miss.
__ bind(&miss); __ bind(&miss);
@ -1067,14 +948,22 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
} }
Object* CallStubCompiler::CompileCallInterceptor(Object* object, Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
JSObject* holder, JSObject* holder,
String* name) { String* name) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- lr: return address // -- r2 : name
// -- lr : return address
// ----------------------------------- // -----------------------------------
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
Label miss; Label miss;
const Register receiver = r0;
const Register holder_reg = r1;
const Register name_reg = r2;
const Register scratch = r3;
// Get the number of arguments. // Get the number of arguments.
const int argc = arguments().immediate(); const int argc = arguments().immediate();
@ -1083,24 +972,79 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
// Get the receiver from the stack into r0. // Get the receiver from the stack into r0.
__ ldr(r0, MemOperand(sp, argc * kPointerSize)); __ ldr(r0, MemOperand(sp, argc * kPointerSize));
// Load the name from the stack into r1.
__ ldr(r1, MemOperand(sp, (argc + 1) * kPointerSize));
CallInterceptorCompiler compiler(arguments(), r1); // Check that the receiver isn't a smi.
CompileLoadInterceptor(&compiler, __ BranchOnSmi(receiver, &miss);
this,
masm(), // Check that the maps haven't changed.
JSObject::cast(object), Register reg = CheckPrototypes(object, receiver, holder, holder_reg,
holder, scratch, name, &miss);
name, if (!reg.is(holder_reg)) {
&lookup, __ mov(holder_reg, reg);
r0, }
r2,
r3, // If we call a constant function when the interceptor returns
&miss); // the no-result sentinel, generate code that optimizes this case.
if (lookup.IsProperty() &&
lookup.IsCacheable() &&
lookup.type() == CONSTANT_FUNCTION &&
lookup.GetConstantFunction()->is_compiled() &&
!holder->IsJSArray()) {
// Constant functions cannot sit on global object.
ASSERT(!lookup.holder()->IsGlobalObject());
// Call the interceptor.
__ EnterInternalFrame();
__ push(holder_reg);
__ push(name_reg);
CompileCallLoadPropertyWithInterceptor(masm(),
receiver,
holder_reg,
name_reg,
holder);
__ pop(name_reg);
__ pop(holder_reg);
__ LeaveInternalFrame();
// r0 no longer contains the receiver.
// If interceptor returns no-result sentinal, call the constant function.
__ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
__ cmp(r0, scratch);
Label invoke;
__ b(ne, &invoke);
// Check the prototypes between the interceptor's holder and the
// constant function's holder.
CheckPrototypes(holder, holder_reg,
lookup.holder(), r0,
scratch,
name,
&miss);
__ InvokeFunction(lookup.GetConstantFunction(),
arguments(),
JUMP_FUNCTION);
__ bind(&invoke);
} else {
// Call a runtime function to load the interceptor property.
__ EnterInternalFrame();
__ push(name_reg);
PushInterceptorArguments(masm(), receiver, holder_reg, name_reg, holder);
__ CallExternalReference(
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall)),
5);
__ pop(name_reg);
__ LeaveInternalFrame();
}
// Move returned value, the function to call, to r1.
__ mov(r1, r0);
// Restore receiver. // Restore receiver.
__ ldr(r0, MemOperand(sp, argc * kPointerSize)); __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
GenerateCallFunction(masm(), object, arguments(), &miss); GenerateCallFunction(masm(), object, arguments(), &miss);
@ -1120,7 +1064,8 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
JSFunction* function, JSFunction* function,
String* name) { String* name) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- lr: return address // -- r2 : name
// -- lr : return address
// ----------------------------------- // -----------------------------------
Label miss; Label miss;
@ -1139,7 +1084,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
} }
// Check that the maps haven't changed. // Check that the maps haven't changed.
CheckPrototypes(object, r0, holder, r3, r2, name, &miss); CheckPrototypes(object, r0, holder, r3, r1, name, &miss);
// Get the value from the cell. // Get the value from the cell.
__ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell))); __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
@ -1159,8 +1104,8 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Check the shared function info. Make sure it hasn't changed. // Check the shared function info. Make sure it hasn't changed.
__ mov(r3, Operand(Handle<SharedFunctionInfo>(function->shared()))); __ mov(r3, Operand(Handle<SharedFunctionInfo>(function->shared())));
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ cmp(r2, r3); __ cmp(r4, r3);
__ b(ne, &miss); __ b(ne, &miss);
} else { } else {
__ cmp(r1, Operand(Handle<JSFunction>(function))); __ cmp(r1, Operand(Handle<JSFunction>(function)));
@ -1178,7 +1123,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Jump to the cached code (tail call). // Jump to the cached code (tail call).
__ IncrementCounter(&Counters::call_global_inline, 1, r2, r3); __ IncrementCounter(&Counters::call_global_inline, 1, r1, r3);
ASSERT(function->is_compiled()); ASSERT(function->is_compiled());
Handle<Code> code(function->code()); Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count()); ParameterCount expected(function->shared()->formal_parameter_count());
@ -1202,25 +1147,19 @@ Object* StoreStubCompiler::CompileStoreField(JSObject* object,
String* name) { String* name) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : value // -- r0 : value
// -- r1 : receiver
// -- r2 : name // -- r2 : name
// -- lr : return address // -- lr : return address
// -- [sp] : receiver
// ----------------------------------- // -----------------------------------
Label miss; Label miss;
// Get the receiver from the stack.
__ ldr(r3, MemOperand(sp, 0 * kPointerSize));
// name register might be clobbered.
GenerateStoreField(masm(), GenerateStoreField(masm(),
Builtins::StoreIC_ExtendStorage,
object, object,
index, index,
transition, transition,
r3, r2, r1, r1, r2, r3,
&miss); &miss);
__ bind(&miss); __ bind(&miss);
__ mov(r2, Operand(Handle<String>(name))); // restore name
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss)); Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET); __ Jump(ic, RelocInfo::CODE_TARGET);
@ -1234,39 +1173,33 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
String* name) { String* name) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : value // -- r0 : value
// -- r1 : receiver
// -- r2 : name // -- r2 : name
// -- lr : return address // -- lr : return address
// -- [sp] : receiver
// ----------------------------------- // -----------------------------------
Label miss; Label miss;
// Get the object from the stack.
__ ldr(r3, MemOperand(sp, 0 * kPointerSize));
// Check that the object isn't a smi. // Check that the object isn't a smi.
__ tst(r3, Operand(kSmiTagMask)); __ tst(r1, Operand(kSmiTagMask));
__ b(eq, &miss); __ b(eq, &miss);
// Check that the map of the object hasn't changed. // Check that the map of the object hasn't changed.
__ ldr(r1, FieldMemOperand(r3, HeapObject::kMapOffset)); __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
__ cmp(r1, Operand(Handle<Map>(object->map()))); __ cmp(r3, Operand(Handle<Map>(object->map())));
__ b(ne, &miss); __ b(ne, &miss);
// Perform global security token check if needed. // Perform global security token check if needed.
if (object->IsJSGlobalProxy()) { if (object->IsJSGlobalProxy()) {
__ CheckAccessGlobalProxy(r3, r1, &miss); __ CheckAccessGlobalProxy(r1, r3, &miss);
} }
// Stub never generated for non-global objects that require access // Stub never generated for non-global objects that require access
// checks. // checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
__ ldr(ip, MemOperand(sp)); // receiver __ push(r1); // receiver
__ push(ip);
__ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback info __ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback info
__ push(ip); __ stm(db_w, sp, ip.bit() | r2.bit() | r0.bit());
__ push(r2); // name
__ push(r0); // value
// Do tail-call to the runtime system. // Do tail-call to the runtime system.
ExternalReference store_callback_property = ExternalReference store_callback_property =
@ -1287,37 +1220,33 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
String* name) { String* name) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : value // -- r0 : value
// -- r1 : receiver
// -- r2 : name // -- r2 : name
// -- lr : return address // -- lr : return address
// -- [sp] : receiver
// ----------------------------------- // -----------------------------------
Label miss; Label miss;
// Get the object from the stack.
__ ldr(r3, MemOperand(sp, 0 * kPointerSize));
// Check that the object isn't a smi. // Check that the object isn't a smi.
__ tst(r3, Operand(kSmiTagMask)); __ tst(r1, Operand(kSmiTagMask));
__ b(eq, &miss); __ b(eq, &miss);
// Check that the map of the object hasn't changed. // Check that the map of the object hasn't changed.
__ ldr(r1, FieldMemOperand(r3, HeapObject::kMapOffset)); __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
__ cmp(r1, Operand(Handle<Map>(receiver->map()))); __ cmp(r3, Operand(Handle<Map>(receiver->map())));
__ b(ne, &miss); __ b(ne, &miss);
// Perform global security token check if needed. // Perform global security token check if needed.
if (receiver->IsJSGlobalProxy()) { if (receiver->IsJSGlobalProxy()) {
__ CheckAccessGlobalProxy(r3, r1, &miss); __ CheckAccessGlobalProxy(r1, r3, &miss);
} }
// Stub never generated for non-global objects that require access // Stub is never generated for non-global objects that require access
// checks. // checks.
ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded()); ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
__ ldr(ip, MemOperand(sp)); // receiver __ push(r1); // receiver.
__ push(ip); __ push(r2); // name.
__ push(r2); // name __ push(r0); // value.
__ push(r0); // value
// Do tail-call to the runtime system. // Do tail-call to the runtime system.
ExternalReference store_ic_property = ExternalReference store_ic_property =
@ -1339,14 +1268,13 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
String* name) { String* name) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : value // -- r0 : value
// -- r1 : receiver
// -- r2 : name // -- r2 : name
// -- lr : return address // -- lr : return address
// -- [sp] : receiver
// ----------------------------------- // -----------------------------------
Label miss; Label miss;
// Check that the map of the global has not changed. // Check that the map of the global has not changed.
__ ldr(r1, MemOperand(sp, 0 * kPointerSize));
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
__ cmp(r3, Operand(Handle<Map>(object->map()))); __ cmp(r3, Operand(Handle<Map>(object->map())));
__ b(ne, &miss); __ b(ne, &miss);
@ -1355,12 +1283,12 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
__ mov(r2, Operand(Handle<JSGlobalPropertyCell>(cell))); __ mov(r2, Operand(Handle<JSGlobalPropertyCell>(cell)));
__ str(r0, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset)); __ str(r0, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
__ IncrementCounter(&Counters::named_store_global_inline, 1, r1, r3); __ IncrementCounter(&Counters::named_store_global_inline, 1, r4, r3);
__ Ret(); __ Ret();
// Handle store cache miss. // Handle store cache miss.
__ bind(&miss); __ bind(&miss);
__ IncrementCounter(&Counters::named_store_global_inline_miss, 1, r1, r3); __ IncrementCounter(&Counters::named_store_global_inline_miss, 1, r4, r3);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss)); Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET); __ Jump(ic, RelocInfo::CODE_TARGET);
@ -1672,7 +1600,7 @@ Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
__ cmp(r2, Operand(Handle<String>(name))); __ cmp(r2, Operand(Handle<String>(name)));
__ b(ne, &miss); __ b(ne, &miss);
GenerateLoadStringLength2(masm(), r0, r1, r3, &miss); GenerateLoadStringLength(masm(), r0, r1, r3, &miss);
__ bind(&miss); __ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_string_length, 1, r1, r3); __ DecrementCounter(&Counters::keyed_load_string_length, 1, r1, r3);
@ -1717,7 +1645,6 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
__ ldr(r3, MemOperand(sp)); __ ldr(r3, MemOperand(sp));
// r1 is used as scratch register, r3 and r2 might be clobbered. // r1 is used as scratch register, r3 and r2 might be clobbered.
GenerateStoreField(masm(), GenerateStoreField(masm(),
Builtins::StoreIC_ExtendStorage,
object, object,
index, index,
transition, transition,

12
deps/v8/src/arm/virtual-frame-arm.cc

@ -47,7 +47,7 @@ VirtualFrame::VirtualFrame()
: elements_(parameter_count() + local_count() + kPreallocatedElements), : elements_(parameter_count() + local_count() + kPreallocatedElements),
stack_pointer_(parameter_count()) { // 0-based index of TOS. stack_pointer_(parameter_count()) { // 0-based index of TOS.
for (int i = 0; i <= stack_pointer_; i++) { for (int i = 0; i <= stack_pointer_; i++) {
elements_.Add(FrameElement::MemoryElement()); elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
} }
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
register_locations_[i] = kIllegalIndex; register_locations_[i] = kIllegalIndex;
@ -233,6 +233,14 @@ void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
} }
#ifdef ENABLE_DEBUGGER_SUPPORT
void VirtualFrame::DebugBreak() {
ASSERT(cgen()->HasValidEntryRegisters());
__ DebugBreak();
}
#endif
void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id, void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags, InvokeJSFlags flags,
int arg_count) { int arg_count) {
@ -305,7 +313,7 @@ void VirtualFrame::EmitPop(Register reg) {
void VirtualFrame::EmitPush(Register reg) { void VirtualFrame::EmitPush(Register reg) {
ASSERT(stack_pointer_ == element_count() - 1); ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement()); elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
stack_pointer_++; stack_pointer_++;
__ push(reg); __ push(reg);
} }

9
deps/v8/src/arm/virtual-frame-arm.h

@ -68,7 +68,8 @@ class VirtualFrame : public ZoneObject {
MacroAssembler* masm() { return cgen()->masm(); } MacroAssembler* masm() { return cgen()->masm(); }
// Create a duplicate of an existing valid frame element. // Create a duplicate of an existing valid frame element.
FrameElement CopyElementAt(int index); FrameElement CopyElementAt(int index,
NumberInfo::Type info = NumberInfo::kUnknown);
// The number of elements on the virtual frame. // The number of elements on the virtual frame.
int element_count() { return elements_.length(); } int element_count() { return elements_.length(); }
@ -297,6 +298,10 @@ class VirtualFrame : public ZoneObject {
void CallRuntime(Runtime::Function* f, int arg_count); void CallRuntime(Runtime::Function* f, int arg_count);
void CallRuntime(Runtime::FunctionId id, int arg_count); void CallRuntime(Runtime::FunctionId id, int arg_count);
#ifdef ENABLE_DEBUGGER_SUPPORT
void DebugBreak();
#endif
// Invoke builtin given the number of arguments it expects on (and // Invoke builtin given the number of arguments it expects on (and
// removes from) the stack. // removes from) the stack.
void InvokeBuiltin(Builtins::JavaScript id, void InvokeBuiltin(Builtins::JavaScript id,
@ -339,7 +344,7 @@ class VirtualFrame : public ZoneObject {
void EmitPushMultiple(int count, int src_regs); void EmitPushMultiple(int count, int src_regs);
// Push an element on the virtual frame. // Push an element on the virtual frame.
void Push(Register reg); void Push(Register reg, NumberInfo::Type info = NumberInfo::kUnknown);
void Push(Handle<Object> value); void Push(Handle<Object> value);
void Push(Smi* value) { Push(Handle<Object>(value)); } void Push(Smi* value) { Push(Handle<Object>(value)); }

5
deps/v8/src/array.js

@ -566,10 +566,11 @@ function ArraySlice(start, end) {
function ArraySplice(start, delete_count) { function ArraySplice(start, delete_count) {
var num_arguments = %_ArgumentsLength(); var num_arguments = %_ArgumentsLength();
// SpiderMonkey and KJS return undefined in the case where no // SpiderMonkey and JSC return undefined in the case where no
// arguments are given instead of using the implicit undefined // arguments are given instead of using the implicit undefined
// arguments. This does not follow ECMA-262, but we do the same for // arguments. This does not follow ECMA-262, but we do the same for
// compatibility. // compatibility.
// TraceMonkey follows ECMA-262 though.
if (num_arguments == 0) return; if (num_arguments == 0) return;
var len = TO_UINT32(this.length); var len = TO_UINT32(this.length);
@ -582,7 +583,7 @@ function ArraySplice(start, delete_count) {
if (start_i > len) start_i = len; if (start_i > len) start_i = len;
} }
// SpiderMonkey and KJS treat the case where no delete count is // SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
// given differently from when an undefined delete count is given. // given differently from when an undefined delete count is given.
// This does not follow ECMA-262, but we do the same for // This does not follow ECMA-262, but we do the same for
// compatibility. // compatibility.

10
deps/v8/src/assembler.cc

@ -430,6 +430,11 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "code target (js construct call)"; return "code target (js construct call)";
case RelocInfo::CODE_TARGET_CONTEXT: case RelocInfo::CODE_TARGET_CONTEXT:
return "code target (context)"; return "code target (context)";
case RelocInfo::DEBUG_BREAK:
#ifndef ENABLE_DEBUGGER_SUPPORT
UNREACHABLE();
#endif
return "debug break";
case RelocInfo::CODE_TARGET: case RelocInfo::CODE_TARGET:
return "code target"; return "code target";
case RelocInfo::RUNTIME_ENTRY: case RelocInfo::RUNTIME_ENTRY:
@ -485,6 +490,11 @@ void RelocInfo::Verify() {
case EMBEDDED_OBJECT: case EMBEDDED_OBJECT:
Object::VerifyPointer(target_object()); Object::VerifyPointer(target_object());
break; break;
case DEBUG_BREAK:
#ifndef ENABLE_DEBUGGER_SUPPORT
UNREACHABLE();
break;
#endif
case CONSTRUCT_CALL: case CONSTRUCT_CALL:
case CODE_TARGET_CONTEXT: case CODE_TARGET_CONTEXT:
case CODE_TARGET: { case CODE_TARGET: {

16
deps/v8/src/assembler.h

@ -119,6 +119,7 @@ class RelocInfo BASE_EMBEDDED {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode). // Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor. CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.
CODE_TARGET_CONTEXT, // code target used for contextual loads. CODE_TARGET_CONTEXT, // code target used for contextual loads.
DEBUG_BREAK,
CODE_TARGET, // code target which is not any of the above. CODE_TARGET, // code target which is not any of the above.
EMBEDDED_OBJECT, EMBEDDED_OBJECT,
EMBEDDED_STRING, EMBEDDED_STRING,
@ -506,8 +507,10 @@ static inline bool is_intn(int x, int n) {
return -(1 << (n-1)) <= x && x < (1 << (n-1)); return -(1 << (n-1)) <= x && x < (1 << (n-1));
} }
static inline bool is_int24(int x) { return is_intn(x, 24); }
static inline bool is_int8(int x) { return is_intn(x, 8); } static inline bool is_int8(int x) { return is_intn(x, 8); }
static inline bool is_int16(int x) { return is_intn(x, 16); }
static inline bool is_int18(int x) { return is_intn(x, 18); }
static inline bool is_int24(int x) { return is_intn(x, 24); }
static inline bool is_uintn(int x, int n) { static inline bool is_uintn(int x, int n) {
return (x & -(1 << n)) == 0; return (x & -(1 << n)) == 0;
@ -519,9 +522,20 @@ static inline bool is_uint4(int x) { return is_uintn(x, 4); }
static inline bool is_uint5(int x) { return is_uintn(x, 5); } static inline bool is_uint5(int x) { return is_uintn(x, 5); }
static inline bool is_uint6(int x) { return is_uintn(x, 6); } static inline bool is_uint6(int x) { return is_uintn(x, 6); }
static inline bool is_uint8(int x) { return is_uintn(x, 8); } static inline bool is_uint8(int x) { return is_uintn(x, 8); }
static inline bool is_uint10(int x) { return is_uintn(x, 10); }
static inline bool is_uint12(int x) { return is_uintn(x, 12); } static inline bool is_uint12(int x) { return is_uintn(x, 12); }
static inline bool is_uint16(int x) { return is_uintn(x, 16); } static inline bool is_uint16(int x) { return is_uintn(x, 16); }
static inline bool is_uint24(int x) { return is_uintn(x, 24); } static inline bool is_uint24(int x) { return is_uintn(x, 24); }
static inline bool is_uint26(int x) { return is_uintn(x, 26); }
static inline bool is_uint28(int x) { return is_uintn(x, 28); }
static inline int NumberOfBitsSet(uint32_t x) {
unsigned int num_bits_set;
for (num_bits_set = 0; x; x >>= 1) {
num_bits_set += x & 1;
}
return num_bits_set;
}
} } // namespace v8::internal } } // namespace v8::internal

39
deps/v8/src/ast.h

@ -102,6 +102,7 @@ namespace internal {
// Forward declarations // Forward declarations
class TargetCollector; class TargetCollector;
class MaterializedLiteral; class MaterializedLiteral;
class DefinitionInfo;
#define DEF_FORWARD_DECLARATION(type) class type; #define DEF_FORWARD_DECLARATION(type) class type;
AST_NODE_LIST(DEF_FORWARD_DECLARATION) AST_NODE_LIST(DEF_FORWARD_DECLARATION)
@ -182,7 +183,7 @@ class Expression: public AstNode {
static const int kNoLabel = -1; static const int kNoLabel = -1;
Expression() : num_(kNoLabel) {} Expression() : num_(kNoLabel), def_(NULL), defined_vars_(NULL) {}
virtual Expression* AsExpression() { return this; } virtual Expression* AsExpression() { return this; }
@ -193,6 +194,11 @@ class Expression: public AstNode {
// names because [] for string objects is handled only by keyed ICs. // names because [] for string objects is handled only by keyed ICs.
virtual bool IsPropertyName() { return false; } virtual bool IsPropertyName() { return false; }
// True if the expression does not have (evaluated) subexpressions.
// Function literals are leaves because their subexpressions are not
// evaluated.
virtual bool IsLeaf() { return false; }
// Mark the expression as being compiled as an expression // Mark the expression as being compiled as an expression
// statement. This is used to transform postfix increments to // statement. This is used to transform postfix increments to
// (faster) prefix increments. // (faster) prefix increments.
@ -206,9 +212,20 @@ class Expression: public AstNode {
// AST node numbering ordered by evaluation order. // AST node numbering ordered by evaluation order.
void set_num(int n) { num_ = n; } void set_num(int n) { num_ = n; }
// Data flow information.
DefinitionInfo* var_def() { return def_; }
void set_var_def(DefinitionInfo* def) { def_ = def; }
ZoneList<DefinitionInfo*>* defined_vars() { return defined_vars_; }
void set_defined_vars(ZoneList<DefinitionInfo*>* defined_vars) {
defined_vars_ = defined_vars;
}
private: private:
StaticType type_; StaticType type_;
int num_; int num_;
DefinitionInfo* def_;
ZoneList<DefinitionInfo*>* defined_vars_;
}; };
@ -720,6 +737,8 @@ class Literal: public Expression {
return false; return false;
} }
virtual bool IsLeaf() { return true; }
// Identity testers. // Identity testers.
bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); } bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); } bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); }
@ -802,6 +821,8 @@ class ObjectLiteral: public MaterializedLiteral {
virtual ObjectLiteral* AsObjectLiteral() { return this; } virtual ObjectLiteral* AsObjectLiteral() { return this; }
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual bool IsLeaf() { return properties()->is_empty(); }
Handle<FixedArray> constant_properties() const { Handle<FixedArray> constant_properties() const {
return constant_properties_; return constant_properties_;
} }
@ -825,6 +846,8 @@ class RegExpLiteral: public MaterializedLiteral {
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual bool IsLeaf() { return true; }
Handle<String> pattern() const { return pattern_; } Handle<String> pattern() const { return pattern_; }
Handle<String> flags() const { return flags_; } Handle<String> flags() const { return flags_; }
@ -849,6 +872,8 @@ class ArrayLiteral: public MaterializedLiteral {
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual ArrayLiteral* AsArrayLiteral() { return this; } virtual ArrayLiteral* AsArrayLiteral() { return this; }
virtual bool IsLeaf() { return values()->is_empty(); }
Handle<FixedArray> constant_elements() const { return constant_elements_; } Handle<FixedArray> constant_elements() const { return constant_elements_; }
ZoneList<Expression*>* values() const { return values_; } ZoneList<Expression*>* values() const { return values_; }
@ -896,6 +921,11 @@ class VariableProxy: public Expression {
return var_ == NULL ? true : var_->IsValidLeftHandSide(); return var_ == NULL ? true : var_->IsValidLeftHandSide();
} }
virtual bool IsLeaf() {
ASSERT(var_ != NULL); // Variable must be resolved.
return var()->is_global() || var()->rewrite()->IsLeaf();
}
bool IsVariable(Handle<String> n) { bool IsVariable(Handle<String> n) {
return !is_this() && name().is_identical_to(n); return !is_this() && name().is_identical_to(n);
} }
@ -981,6 +1011,8 @@ class Slot: public Expression {
// Type testing & conversion // Type testing & conversion
virtual Slot* AsSlot() { return this; } virtual Slot* AsSlot() { return this; }
virtual bool IsLeaf() { return true; }
// Accessors // Accessors
Variable* var() const { return var_; } Variable* var() const { return var_; }
Type type() const { return type_; } Type type() const { return type_; }
@ -1337,6 +1369,8 @@ class FunctionLiteral: public Expression {
// Type testing & conversion // Type testing & conversion
virtual FunctionLiteral* AsFunctionLiteral() { return this; } virtual FunctionLiteral* AsFunctionLiteral() { return this; }
virtual bool IsLeaf() { return true; }
Handle<String> name() const { return name_; } Handle<String> name() const { return name_; }
Scope* scope() const { return scope_; } Scope* scope() const { return scope_; }
ZoneList<Statement*>* body() const { return body_; } ZoneList<Statement*>* body() const { return body_; }
@ -1403,6 +1437,8 @@ class FunctionBoilerplateLiteral: public Expression {
Handle<JSFunction> boilerplate() const { return boilerplate_; } Handle<JSFunction> boilerplate() const { return boilerplate_; }
virtual bool IsLeaf() { return true; }
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
private: private:
@ -1413,6 +1449,7 @@ class FunctionBoilerplateLiteral: public Expression {
class ThisFunction: public Expression { class ThisFunction: public Expression {
public: public:
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual bool IsLeaf() { return true; }
}; };

177
deps/v8/src/bootstrapper.cc

@ -192,116 +192,6 @@ void Bootstrapper::TearDown() {
} }
// Pending fixups are code positions that refer to builtin code
// objects that were not available at the time the code was generated.
// The pending list is processed whenever an environment has been
// created.
class PendingFixups : public AllStatic {
public:
static void Add(Code* code, MacroAssembler* masm);
static bool Process(Handle<JSBuiltinsObject> builtins);
static void Iterate(ObjectVisitor* v);
private:
static List<Object*> code_;
static List<const char*> name_;
static List<int> pc_;
static List<uint32_t> flags_;
static void Clear();
};
List<Object*> PendingFixups::code_(0);
List<const char*> PendingFixups::name_(0);
List<int> PendingFixups::pc_(0);
List<uint32_t> PendingFixups::flags_(0);
void PendingFixups::Add(Code* code, MacroAssembler* masm) {
// Note this code is not only called during bootstrapping.
List<MacroAssembler::Unresolved>* unresolved = masm->unresolved();
int n = unresolved->length();
for (int i = 0; i < n; i++) {
const char* name = unresolved->at(i).name;
code_.Add(code);
name_.Add(name);
pc_.Add(unresolved->at(i).pc);
flags_.Add(unresolved->at(i).flags);
LOG(StringEvent("unresolved", name));
}
}
bool PendingFixups::Process(Handle<JSBuiltinsObject> builtins) {
HandleScope scope;
// NOTE: Extra fixups may be added to the list during the iteration
// due to lazy compilation of functions during the processing. Do not
// cache the result of getting the length of the code list.
for (int i = 0; i < code_.length(); i++) {
const char* name = name_[i];
uint32_t flags = flags_[i];
Handle<String> symbol = Factory::LookupAsciiSymbol(name);
Object* o = builtins->GetProperty(*symbol);
#ifdef DEBUG
if (!o->IsJSFunction()) {
V8_Fatal(__FILE__, __LINE__, "Cannot resolve call to builtin %s", name);
}
#endif
Handle<SharedFunctionInfo> shared(JSFunction::cast(o)->shared());
// Make sure the number of parameters match the formal parameter count.
int argc = Bootstrapper::FixupFlagsArgumentsCount::decode(flags);
USE(argc);
ASSERT(shared->formal_parameter_count() == argc);
// Do lazy compilation if necessary and check for stack overflows.
if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) {
Clear();
return false;
}
Code* code = Code::cast(code_[i]);
Address pc = code->instruction_start() + pc_[i];
RelocInfo target(pc, RelocInfo::CODE_TARGET, 0);
bool use_code_object = Bootstrapper::FixupFlagsUseCodeObject::decode(flags);
if (use_code_object) {
target.set_target_object(shared->code());
} else {
target.set_target_address(shared->code()->instruction_start());
}
LOG(StringEvent("resolved", name));
}
Clear();
// TODO(1240818): We should probably try to avoid doing this for all
// the V8 builtin JS files. It should only happen after running
// runtime.js - just like there shouldn't be any fixups left after
// that.
for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
Handle<String> name = Factory::LookupAsciiSymbol(Builtins::GetName(id));
JSFunction* function = JSFunction::cast(builtins->GetProperty(*name));
builtins->set_javascript_builtin(id, function);
}
return true;
}
void PendingFixups::Clear() {
code_.Clear();
name_.Clear();
pc_.Clear();
flags_.Clear();
}
void PendingFixups::Iterate(ObjectVisitor* v) {
if (!code_.is_empty()) {
v->VisitPointers(&code_[0], &code_[0] + code_.length());
}
}
class Genesis BASE_EMBEDDED { class Genesis BASE_EMBEDDED {
public: public:
Genesis(Handle<Object> global_object, Genesis(Handle<Object> global_object,
@ -338,6 +228,7 @@ class Genesis BASE_EMBEDDED {
bool InstallExtension(const char* name); bool InstallExtension(const char* name);
bool InstallExtension(v8::RegisteredExtension* current); bool InstallExtension(v8::RegisteredExtension* current);
bool InstallSpecialObjects(); bool InstallSpecialObjects();
bool InstallJSBuiltins(Handle<JSBuiltinsObject> builtins);
bool ConfigureApiObject(Handle<JSObject> object, bool ConfigureApiObject(Handle<JSObject> object,
Handle<ObjectTemplateInfo> object_template); Handle<ObjectTemplateInfo> object_template);
bool ConfigureGlobalObjects(v8::Handle<v8::ObjectTemplate> global_template); bool ConfigureGlobalObjects(v8::Handle<v8::ObjectTemplate> global_template);
@ -379,15 +270,6 @@ void Bootstrapper::Iterate(ObjectVisitor* v) {
v->Synchronize("NativesCache"); v->Synchronize("NativesCache");
extensions_cache.Iterate(v); extensions_cache.Iterate(v);
v->Synchronize("Extensions"); v->Synchronize("Extensions");
PendingFixups::Iterate(v);
v->Synchronize("PendingFixups");
}
// While setting up the environment, we collect code positions that
// need to be patched before we can run any code in the environment.
void Bootstrapper::AddFixup(Code* code, MacroAssembler* masm) {
PendingFixups::Add(code, masm);
} }
@ -841,11 +723,11 @@ void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
#ifdef DEBUG #ifdef DEBUG
LookupResult lookup; LookupResult lookup;
result->LocalLookup(Heap::callee_symbol(), &lookup); result->LocalLookup(Heap::callee_symbol(), &lookup);
ASSERT(lookup.IsValid() && (lookup.type() == FIELD)); ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
ASSERT(lookup.GetFieldIndex() == Heap::arguments_callee_index); ASSERT(lookup.GetFieldIndex() == Heap::arguments_callee_index);
result->LocalLookup(Heap::length_symbol(), &lookup); result->LocalLookup(Heap::length_symbol(), &lookup);
ASSERT(lookup.IsValid() && (lookup.type() == FIELD)); ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
ASSERT(lookup.GetFieldIndex() == Heap::arguments_length_index); ASSERT(lookup.GetFieldIndex() == Heap::arguments_length_index);
ASSERT(result->map()->inobject_properties() > Heap::arguments_callee_index); ASSERT(result->map()->inobject_properties() > Heap::arguments_callee_index);
@ -942,7 +824,8 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
ASSERT(source->IsAsciiRepresentation()); ASSERT(source->IsAsciiRepresentation());
Handle<String> script_name = Factory::NewStringFromUtf8(name); Handle<String> script_name = Factory::NewStringFromUtf8(name);
boilerplate = boilerplate =
Compiler::Compile(source, script_name, 0, 0, extension, NULL); Compiler::Compile(source, script_name, 0, 0, extension, NULL,
Handle<String>::null());
if (boilerplate.is_null()) return false; if (boilerplate.is_null()) return false;
cache->Add(name, boilerplate); cache->Add(name, boilerplate);
} }
@ -968,8 +851,7 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
Handle<Object> result = Handle<Object> result =
Execution::Call(fun, receiver, 0, NULL, &has_pending_exception); Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
if (has_pending_exception) return false; if (has_pending_exception) return false;
return PendingFixups::Process( return true;
Handle<JSBuiltinsObject>(Top::context()->builtins()));
} }
@ -989,7 +871,6 @@ void Genesis::InstallNativeFunctions() {
INSTALL_NATIVE(JSFunction, "ToInteger", to_integer_fun); INSTALL_NATIVE(JSFunction, "ToInteger", to_integer_fun);
INSTALL_NATIVE(JSFunction, "ToUint32", to_uint32_fun); INSTALL_NATIVE(JSFunction, "ToUint32", to_uint32_fun);
INSTALL_NATIVE(JSFunction, "ToInt32", to_int32_fun); INSTALL_NATIVE(JSFunction, "ToInt32", to_int32_fun);
INSTALL_NATIVE(JSFunction, "ToBoolean", to_boolean_fun);
INSTALL_NATIVE(JSFunction, "GlobalEval", global_eval_fun); INSTALL_NATIVE(JSFunction, "GlobalEval", global_eval_fun);
INSTALL_NATIVE(JSFunction, "Instantiate", instantiate_fun); INSTALL_NATIVE(JSFunction, "Instantiate", instantiate_fun);
INSTALL_NATIVE(JSFunction, "ConfigureTemplateInstance", INSTALL_NATIVE(JSFunction, "ConfigureTemplateInstance",
@ -1176,6 +1057,10 @@ bool Genesis::InstallNatives() {
i < Natives::GetBuiltinsCount(); i < Natives::GetBuiltinsCount();
i++) { i++) {
if (!CompileBuiltin(i)) return false; if (!CompileBuiltin(i)) return false;
// TODO(ager): We really only need to install the JS builtin
// functions on the builtins object after compiling and running
// runtime.js.
if (!InstallJSBuiltins(builtins)) return false;
} }
// Setup natives with lazy loading. // Setup natives with lazy loading.
@ -1377,6 +1262,22 @@ bool Genesis::InstallExtension(v8::RegisteredExtension* current) {
} }
bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
HandleScope scope;
for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
Handle<String> name = Factory::LookupAsciiSymbol(Builtins::GetName(id));
Handle<JSFunction> function
= Handle<JSFunction>(JSFunction::cast(builtins->GetProperty(*name)));
builtins->set_javascript_builtin(id, *function);
Handle<SharedFunctionInfo> shared
= Handle<SharedFunctionInfo>(function->shared());
if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
}
return true;
}
bool Genesis::ConfigureGlobalObjects( bool Genesis::ConfigureGlobalObjects(
v8::Handle<v8::ObjectTemplate> global_proxy_template) { v8::Handle<v8::ObjectTemplate> global_proxy_template) {
Handle<JSObject> global_proxy( Handle<JSObject> global_proxy(
@ -1451,7 +1352,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
LookupResult result; LookupResult result;
to->LocalLookup(descs->GetKey(i), &result); to->LocalLookup(descs->GetKey(i), &result);
// If the property is already there we skip it // If the property is already there we skip it
if (result.IsValid()) continue; if (result.IsProperty()) continue;
HandleScope inner; HandleScope inner;
Handle<DescriptorArray> inst_descs = Handle<DescriptorArray> inst_descs =
Handle<DescriptorArray>(to->map()->instance_descriptors()); Handle<DescriptorArray>(to->map()->instance_descriptors());
@ -1488,7 +1389,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
// If the property is already there we skip it. // If the property is already there we skip it.
LookupResult result; LookupResult result;
to->LocalLookup(String::cast(raw_key), &result); to->LocalLookup(String::cast(raw_key), &result);
if (result.IsValid()) continue; if (result.IsProperty()) continue;
// Set the property. // Set the property.
Handle<String> key = Handle<String>(String::cast(raw_key)); Handle<String> key = Handle<String>(String::cast(raw_key));
Handle<Object> value = Handle<Object>(properties->ValueAt(i)); Handle<Object> value = Handle<Object>(properties->ValueAt(i));
@ -1572,25 +1473,33 @@ void Genesis::AddSpecialFunction(Handle<JSObject> prototype,
void Genesis::BuildSpecialFunctionTable() { void Genesis::BuildSpecialFunctionTable() {
HandleScope scope; HandleScope scope;
Handle<JSObject> global = Handle<JSObject>(global_context()->global()); Handle<JSObject> global = Handle<JSObject>(global_context()->global());
// Add special versions for Array.prototype.pop and push. // Add special versions for some Array.prototype functions.
Handle<JSFunction> function = Handle<JSFunction> function =
Handle<JSFunction>( Handle<JSFunction>(
JSFunction::cast(global->GetProperty(Heap::Array_symbol()))); JSFunction::cast(global->GetProperty(Heap::Array_symbol())));
Handle<JSObject> visible_prototype = Handle<JSObject> visible_prototype =
Handle<JSObject>(JSObject::cast(function->prototype())); Handle<JSObject>(JSObject::cast(function->prototype()));
// Remember to put push and pop on the hidden prototype if it's there. // Remember to put those specializations on the hidden prototype if present.
Handle<JSObject> push_and_pop_prototype; Handle<JSObject> special_prototype;
Handle<Object> superproto(visible_prototype->GetPrototype()); Handle<Object> superproto(visible_prototype->GetPrototype());
if (superproto->IsJSObject() && if (superproto->IsJSObject() &&
JSObject::cast(*superproto)->map()->is_hidden_prototype()) { JSObject::cast(*superproto)->map()->is_hidden_prototype()) {
push_and_pop_prototype = Handle<JSObject>::cast(superproto); special_prototype = Handle<JSObject>::cast(superproto);
} else { } else {
push_and_pop_prototype = visible_prototype; special_prototype = visible_prototype;
} }
AddSpecialFunction(push_and_pop_prototype, "pop", AddSpecialFunction(special_prototype, "pop",
Handle<Code>(Builtins::builtin(Builtins::ArrayPop))); Handle<Code>(Builtins::builtin(Builtins::ArrayPop)));
AddSpecialFunction(push_and_pop_prototype, "push", AddSpecialFunction(special_prototype, "push",
Handle<Code>(Builtins::builtin(Builtins::ArrayPush))); Handle<Code>(Builtins::builtin(Builtins::ArrayPush)));
AddSpecialFunction(special_prototype, "shift",
Handle<Code>(Builtins::builtin(Builtins::ArrayShift)));
AddSpecialFunction(special_prototype, "unshift",
Handle<Code>(Builtins::builtin(Builtins::ArrayUnshift)));
AddSpecialFunction(special_prototype, "slice",
Handle<Code>(Builtins::builtin(Builtins::ArraySlice)));
AddSpecialFunction(special_prototype, "splice",
Handle<Code>(Builtins::builtin(Builtins::ArraySplice)));
} }

3
deps/v8/src/bootstrapper.h

@ -59,9 +59,6 @@ class Bootstrapper : public AllStatic {
Handle<JSFunction>* handle); Handle<JSFunction>* handle);
static void NativesCacheAdd(Vector<const char> name, Handle<JSFunction> fun); static void NativesCacheAdd(Vector<const char> name, Handle<JSFunction> fun);
// Append code that needs fixup at the end of boot strapping.
static void AddFixup(Code* code, MacroAssembler* masm);
// Tells whether bootstrapping is active. // Tells whether bootstrapping is active.
static bool IsActive(); static bool IsActive();

499
deps/v8/src/builtins.cc

@ -168,28 +168,6 @@ static inline bool CalledAsConstructor() {
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
Handle<Code> Builtins::GetCode(JavaScript id, bool* resolved) {
Code* code = Builtins::builtin(Builtins::Illegal);
*resolved = false;
if (Top::context() != NULL) {
Object* object = Top::builtins()->javascript_builtin(id);
if (object->IsJSFunction()) {
Handle<SharedFunctionInfo> shared(JSFunction::cast(object)->shared());
// Make sure the number of parameters match the formal parameter count.
ASSERT(shared->formal_parameter_count() ==
Builtins::GetArgumentsCount(id));
if (EnsureCompiled(shared, CLEAR_EXCEPTION)) {
code = shared->code();
*resolved = true;
}
}
}
return Handle<Code>(code);
}
BUILTIN(Illegal) { BUILTIN(Illegal) {
UNREACHABLE(); UNREACHABLE();
return Heap::undefined_value(); // Make compiler happy. return Heap::undefined_value(); // Make compiler happy.
@ -268,19 +246,19 @@ BUILTIN(ArrayPush) {
JSArray* array = JSArray::cast(*args.receiver()); JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements()); ASSERT(array->HasFastElements());
// Make sure we have space for the elements.
int len = Smi::cast(array->length())->value(); int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
if (to_add == 0) {
return Smi::FromInt(len);
}
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
ASSERT(to_add <= (Smi::kMaxValue - len));
// Set new length. int new_length = len + to_add;
int new_length = len + args.length() - 1;
FixedArray* elms = FixedArray::cast(array->elements()); FixedArray* elms = FixedArray::cast(array->elements());
if (new_length <= elms->length()) { if (new_length > elms->length()) {
// Backing storage has extra space for the provided values.
for (int index = 0; index < args.length() - 1; index++) {
elms->set(index + len, args[index+1]);
}
} else {
// New backing storage is needed. // New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16; int capacity = new_length + (new_length >> 1) + 16;
Object* obj = Heap::AllocateFixedArrayWithHoles(capacity); Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
@ -291,16 +269,21 @@ BUILTIN(ArrayPush) {
WriteBarrierMode mode = new_elms->GetWriteBarrierMode(no_gc); WriteBarrierMode mode = new_elms->GetWriteBarrierMode(no_gc);
// Fill out the new array with old elements. // Fill out the new array with old elements.
for (int i = 0; i < len; i++) new_elms->set(i, elms->get(i), mode); for (int i = 0; i < len; i++) new_elms->set(i, elms->get(i), mode);
// Add the provided values. elms = new_elms;
for (int index = 0; index < args.length() - 1; index++) { array->set_elements(elms);
new_elms->set(index + len, args[index+1], mode); }
}
// Set the new backing storage. AssertNoAllocation no_gc;
array->set_elements(new_elms); WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
// Add the provided values.
for (int index = 0; index < to_add; index++) {
elms->set(index + len, args[index + 1], mode);
} }
// Set the length. // Set the length.
array->set_length(Smi::FromInt(new_length)); array->set_length(Smi::FromInt(new_length));
return array->length(); return Smi::FromInt(new_length);
} }
@ -335,6 +318,355 @@ BUILTIN(ArrayPop) {
} }
static Object* GetElementToMove(uint32_t index,
FixedArray* elms,
JSObject* prototype) {
Object* e = elms->get(index);
if (e->IsTheHole() && prototype->HasElement(index)) {
e = prototype->GetElement(index);
}
return e;
}
BUILTIN(ArrayShift) {
JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value();
if (len == 0) return Heap::undefined_value();
// Fetch the prototype.
JSFunction* array_function =
Top::context()->global_context()->array_function();
JSObject* prototype = JSObject::cast(array_function->prototype());
FixedArray* elms = FixedArray::cast(array->elements());
// Get first element
Object* first = elms->get(0);
if (first->IsTheHole()) {
first = prototype->GetElement(0);
}
// Shift the elements.
for (int i = 0; i < len - 1; i++) {
elms->set(i, GetElementToMove(i + 1, elms, prototype));
}
elms->set(len - 1, Heap::the_hole_value());
// Set the length.
array->set_length(Smi::FromInt(len - 1));
return first;
}
BUILTIN(ArrayUnshift) {
JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
// Note that we cannot quit early if to_add == 0 as
// values should be lifted from prototype into
// the array.
int new_length = len + to_add;
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
ASSERT(to_add <= (Smi::kMaxValue - len));
FixedArray* elms = FixedArray::cast(array->elements());
// Fetch the prototype.
JSFunction* array_function =
Top::context()->global_context()->array_function();
JSObject* prototype = JSObject::cast(array_function->prototype());
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
if (obj->IsFailure()) return obj;
AssertNoAllocation no_gc;
FixedArray* new_elms = FixedArray::cast(obj);
WriteBarrierMode mode = new_elms->GetWriteBarrierMode(no_gc);
// Fill out the new array with old elements.
for (int i = 0; i < len; i++)
new_elms->set(to_add + i,
GetElementToMove(i, elms, prototype),
mode);
elms = new_elms;
array->set_elements(elms);
} else {
AssertNoAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
// Move elements to the right
for (int i = 0; i < len; i++) {
elms->set(new_length - i - 1,
GetElementToMove(len - i - 1, elms, prototype),
mode);
}
}
// Add the provided values.
AssertNoAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
for (int i = 0; i < to_add; i++) {
elms->set(i, args[i + 1], mode);
}
// Set the length.
array->set_length(Smi::FromInt(new_length));
return Smi::FromInt(new_length);
}
static Object* CallJsBuiltin(const char* name,
BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
HandleScope handleScope;
Handle<Object> js_builtin =
GetProperty(Handle<JSObject>(Top::global_context()->builtins()),
name);
ASSERT(js_builtin->IsJSFunction());
Handle<JSFunction> function(Handle<JSFunction>::cast(js_builtin));
Vector<Object**> argv(Vector<Object**>::New(args.length() - 1));
int n_args = args.length() - 1;
for (int i = 0; i < n_args; i++) {
argv[i] = &args[i + 1];
}
bool pending_exception = false;
Handle<Object> result = Execution::Call(function,
args.receiver(),
n_args,
argv.start(),
&pending_exception);
if (pending_exception) return Failure::Exception();
return *result;
}
BUILTIN(ArraySlice) {
JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value();
int n_arguments = args.length() - 1;
// Note carefully choosen defaults---if argument is missing,
// it's undefined which gets converted to 0 for relativeStart
// and to len for relativeEnd.
int relativeStart = 0;
int relativeEnd = len;
if (n_arguments > 0) {
Object* arg1 = args[1];
if (arg1->IsSmi()) {
relativeStart = Smi::cast(arg1)->value();
} else if (!arg1->IsUndefined()) {
return CallJsBuiltin("ArraySlice", args);
}
if (n_arguments > 1) {
Object* arg2 = args[2];
if (arg2->IsSmi()) {
relativeEnd = Smi::cast(arg2)->value();
} else if (!arg2->IsUndefined()) {
return CallJsBuiltin("ArraySlice", args);
}
}
}
// ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 6.
int k = (relativeStart < 0) ? Max(len + relativeStart, 0)
: Min(relativeStart, len);
// ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 8.
int final = (relativeEnd < 0) ? Max(len + relativeEnd, 0)
: Min(relativeEnd, len);
// Calculate the length of result array.
int result_len = final - k;
if (result_len < 0) {
result_len = 0;
}
JSFunction* array_function =
Top::context()->global_context()->array_function();
Object* result = Heap::AllocateJSObject(array_function);
if (result->IsFailure()) return result;
JSArray* result_array = JSArray::cast(result);
result = Heap::AllocateFixedArrayWithHoles(result_len);
if (result->IsFailure()) return result;
FixedArray* result_elms = FixedArray::cast(result);
FixedArray* elms = FixedArray::cast(array->elements());
// Fetch the prototype.
JSObject* prototype = JSObject::cast(array_function->prototype());
AssertNoAllocation no_gc;
WriteBarrierMode mode = result_elms->GetWriteBarrierMode(no_gc);
// Fill newly created array.
for (int i = 0; i < result_len; i++) {
result_elms->set(i,
GetElementToMove(k + i, elms, prototype),
mode);
}
// Set elements.
result_array->set_elements(result_elms);
// Set the length.
result_array->set_length(Smi::FromInt(result_len));
return result_array;
}
BUILTIN(ArraySplice) {
JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value();
int n_arguments = args.length() - 1;
// SpiderMonkey and JSC return undefined in the case where no
// arguments are given instead of using the implicit undefined
// arguments. This does not follow ECMA-262, but we do the same for
// compatibility.
// TraceMonkey follows ECMA-262 though.
if (n_arguments == 0) {
return Heap::undefined_value();
}
int relativeStart = 0;
Object* arg1 = args[1];
if (arg1->IsSmi()) {
relativeStart = Smi::cast(arg1)->value();
} else if (!arg1->IsUndefined()) {
return CallJsBuiltin("ArraySplice", args);
}
int actualStart = (relativeStart < 0) ? Max(len + relativeStart, 0)
: Min(relativeStart, len);
// SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
// given differently from when an undefined delete count is given.
// This does not follow ECMA-262, but we do the same for
// compatibility.
int deleteCount = len;
if (n_arguments > 1) {
Object* arg2 = args[2];
if (arg2->IsSmi()) {
deleteCount = Smi::cast(arg2)->value();
} else {
return CallJsBuiltin("ArraySplice", args);
}
}
int actualDeleteCount = Min(Max(deleteCount, 0), len - actualStart);
JSFunction* array_function =
Top::context()->global_context()->array_function();
// Allocate result array.
Object* result = Heap::AllocateJSObject(array_function);
if (result->IsFailure()) return result;
JSArray* result_array = JSArray::cast(result);
result = Heap::AllocateFixedArrayWithHoles(actualDeleteCount);
if (result->IsFailure()) return result;
FixedArray* result_elms = FixedArray::cast(result);
FixedArray* elms = FixedArray::cast(array->elements());
// Fetch the prototype.
JSObject* prototype = JSObject::cast(array_function->prototype());
AssertNoAllocation no_gc;
WriteBarrierMode mode = result_elms->GetWriteBarrierMode(no_gc);
// Fill newly created array.
for (int k = 0; k < actualDeleteCount; k++) {
result_elms->set(k,
GetElementToMove(actualStart + k, elms, prototype),
mode);
}
// Set elements.
result_array->set_elements(result_elms);
// Set the length.
result_array->set_length(Smi::FromInt(actualDeleteCount));
int itemCount = (n_arguments > 1) ? (n_arguments - 2) : 0;
int new_length = len - actualDeleteCount + itemCount;
mode = elms->GetWriteBarrierMode(no_gc);
if (itemCount < actualDeleteCount) {
// Shrink the array.
for (int k = actualStart; k < (len - actualDeleteCount); k++) {
elms->set(k + itemCount,
GetElementToMove(k + actualDeleteCount, elms, prototype),
mode);
}
for (int k = len; k > new_length; k--) {
elms->set(k - 1, Heap::the_hole_value());
}
} else if (itemCount > actualDeleteCount) {
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
ASSERT((itemCount - actualDeleteCount) <= (Smi::kMaxValue - len));
FixedArray* source_elms = elms;
// Check if array need to grow.
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
if (obj->IsFailure()) return obj;
FixedArray* new_elms = FixedArray::cast(obj);
mode = new_elms->GetWriteBarrierMode(no_gc);
// Copy the part before actualStart as is.
for (int k = 0; k < actualStart; k++) {
new_elms->set(k, elms->get(k), mode);
}
source_elms = elms;
elms = new_elms;
array->set_elements(elms);
}
for (int k = len - actualDeleteCount; k > actualStart; k--) {
elms->set(k + itemCount - 1,
GetElementToMove(k + actualDeleteCount - 1,
source_elms,
prototype),
mode);
}
}
for (int k = actualStart; k < actualStart + itemCount; k++) {
elms->set(k, args[3 + k - actualStart], mode);
}
// Set the length.
array->set_length(Smi::FromInt(new_length));
return result_array;
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// //
@ -474,6 +806,76 @@ BUILTIN(HandleApiCallConstruct) {
} }
#ifdef DEBUG
static void VerifyTypeCheck(Handle<JSObject> object,
Handle<JSFunction> function) {
FunctionTemplateInfo* info =
FunctionTemplateInfo::cast(function->shared()->function_data());
if (info->signature()->IsUndefined()) return;
SignatureInfo* signature = SignatureInfo::cast(info->signature());
Object* receiver_type = signature->receiver();
if (receiver_type->IsUndefined()) return;
FunctionTemplateInfo* type = FunctionTemplateInfo::cast(receiver_type);
ASSERT(object->IsInstanceOf(type));
}
#endif
BUILTIN(FastHandleApiCall) {
ASSERT(!CalledAsConstructor());
const bool is_construct = false;
// We expect four more arguments: function, callback, call data, and holder.
const int args_length = args.length() - 4;
ASSERT(args_length >= 0);
Handle<JSFunction> function = args.at<JSFunction>(args_length);
Object* callback_obj = args[args_length + 1];
Handle<Object> data_handle = args.at<Object>(args_length + 2);
Handle<JSObject> checked_holder = args.at<JSObject>(args_length + 3);
#ifdef DEBUG
VerifyTypeCheck(checked_holder, function);
#endif
v8::Local<v8::Object> holder = v8::Utils::ToLocal(checked_holder);
v8::Local<v8::Function> callee = v8::Utils::ToLocal(function);
v8::InvocationCallback callback =
v8::ToCData<v8::InvocationCallback>(callback_obj);
v8::Local<v8::Value> data = v8::Utils::ToLocal(data_handle);
v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
data,
holder,
callee,
is_construct,
reinterpret_cast<void**>(&args[0] - 1),
args_length - 1);
HandleScope scope;
Object* result;
v8::Handle<v8::Value> value;
{
// Leaving JavaScript.
VMState state(EXTERNAL);
#ifdef ENABLE_LOGGING_AND_PROFILING
state.set_external_callback(v8::ToCData<Address>(callback_obj));
#endif
value = callback(new_args);
}
if (value.IsEmpty()) {
result = Heap::undefined_value();
} else {
result = *reinterpret_cast<Object**>(*value);
}
RETURN_IF_SCHEDULED_EXCEPTION();
return result;
}
// Helper function to handle calls to non-function objects created through the // Helper function to handle calls to non-function objects created through the
// API. The object can be called as either a constructor (using new) or just as // API. The object can be called as either a constructor (using new) or just as
// a function (without new). // a function (without new).
@ -657,6 +1059,10 @@ static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
KeyedLoadIC::GeneratePreMonomorphic(masm); KeyedLoadIC::GeneratePreMonomorphic(masm);
} }
static void Generate_KeyedLoadIC_IndexedInterceptor(MacroAssembler* masm) {
KeyedLoadIC::GenerateIndexedInterceptor(masm);
}
static void Generate_StoreIC_Initialize(MacroAssembler* masm) { static void Generate_StoreIC_Initialize(MacroAssembler* masm) {
StoreIC::GenerateInitialize(masm); StoreIC::GenerateInitialize(masm);
@ -668,15 +1074,16 @@ static void Generate_StoreIC_Miss(MacroAssembler* masm) {
} }
static void Generate_StoreIC_ExtendStorage(MacroAssembler* masm) {
StoreIC::GenerateExtendStorage(masm);
}
static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) { static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) {
StoreIC::GenerateMegamorphic(masm); StoreIC::GenerateMegamorphic(masm);
} }
static void Generate_StoreIC_ArrayLength(MacroAssembler* masm) {
StoreIC::GenerateArrayLength(masm);
}
static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) { static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
KeyedStoreIC::GenerateGeneric(masm); KeyedStoreIC::GenerateGeneric(masm);
} }
@ -720,11 +1127,6 @@ static void Generate_KeyedStoreIC_ExternalFloatArray(MacroAssembler* masm) {
} }
static void Generate_KeyedStoreIC_ExtendStorage(MacroAssembler* masm) {
KeyedStoreIC::GenerateExtendStorage(masm);
}
static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) { static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
KeyedStoreIC::GenerateMiss(masm); KeyedStoreIC::GenerateMiss(masm);
} }
@ -869,9 +1271,6 @@ void Builtins::Setup(bool create_heap_objects) {
v8::internal::V8::FatalProcessOutOfMemory("CreateCode"); v8::internal::V8::FatalProcessOutOfMemory("CreateCode");
} }
} }
// Add any unresolved jumps or calls to the fixup list in the
// bootstrapper.
Bootstrapper::AddFixup(Code::cast(code), &masm);
// Log the event and add the code to the builtins array. // Log the event and add the code to the builtins array.
LOG(CodeCreateEvent(Logger::BUILTIN_TAG, LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
Code::cast(code), functions[i].s_name)); Code::cast(code), functions[i].s_name));

10
deps/v8/src/builtins.h

@ -48,8 +48,13 @@ enum BuiltinExtraArguments {
\ \
V(ArrayPush, NO_EXTRA_ARGUMENTS) \ V(ArrayPush, NO_EXTRA_ARGUMENTS) \
V(ArrayPop, NO_EXTRA_ARGUMENTS) \ V(ArrayPop, NO_EXTRA_ARGUMENTS) \
V(ArrayShift, NO_EXTRA_ARGUMENTS) \
V(ArrayUnshift, NO_EXTRA_ARGUMENTS) \
V(ArraySlice, NO_EXTRA_ARGUMENTS) \
V(ArraySplice, NO_EXTRA_ARGUMENTS) \
\ \
V(HandleApiCall, NEEDS_CALLED_FUNCTION) \ V(HandleApiCall, NEEDS_CALLED_FUNCTION) \
V(FastHandleApiCall, NO_EXTRA_ARGUMENTS) \
V(HandleApiCallConstruct, NEEDS_CALLED_FUNCTION) \ V(HandleApiCallConstruct, NEEDS_CALLED_FUNCTION) \
V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS) \ V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS) \
V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS) V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS)
@ -69,9 +74,6 @@ enum BuiltinExtraArguments {
V(StoreIC_Miss, BUILTIN, UNINITIALIZED) \ V(StoreIC_Miss, BUILTIN, UNINITIALIZED) \
V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED) \ V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED) \
\ \
V(StoreIC_ExtendStorage, BUILTIN, UNINITIALIZED) \
V(KeyedStoreIC_ExtendStorage, BUILTIN, UNINITIALIZED) \
\
V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED) \ V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED) \
V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC) \ V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC) \
V(LoadIC_Normal, LOAD_IC, MONOMORPHIC) \ V(LoadIC_Normal, LOAD_IC, MONOMORPHIC) \
@ -91,8 +93,10 @@ enum BuiltinExtraArguments {
V(KeyedLoadIC_ExternalIntArray, KEYED_LOAD_IC, MEGAMORPHIC) \ V(KeyedLoadIC_ExternalIntArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalUnsignedIntArray, KEYED_LOAD_IC, MEGAMORPHIC) \ V(KeyedLoadIC_ExternalUnsignedIntArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalFloatArray, KEYED_LOAD_IC, MEGAMORPHIC) \ V(KeyedLoadIC_ExternalFloatArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC) \
\ \
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED) \ V(StoreIC_Initialize, STORE_IC, UNINITIALIZED) \
V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC) \ V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC) \
\ \
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED) \ V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED) \

4
deps/v8/src/checks.h

@ -125,7 +125,9 @@ static inline void CheckEqualsHelper(const char* file,
const char* expected, const char* expected,
const char* value_source, const char* value_source,
const char* value) { const char* value) {
if (strcmp(expected, value) != 0) { if ((expected == NULL && value != NULL) ||
(expected != NULL && value == NULL) ||
(expected != NULL && value != NULL && strcmp(expected, value) != 0)) {
V8_Fatal(file, line, V8_Fatal(file, line,
"CHECK_EQ(%s, %s) failed\n# Expected: %s\n# Found: %s", "CHECK_EQ(%s, %s) failed\n# Expected: %s\n# Found: %s",
expected_source, value_source, expected, value); expected_source, value_source, expected, value);

16
deps/v8/src/code-stubs.cc

@ -31,6 +31,7 @@
#include "code-stubs.h" #include "code-stubs.h"
#include "factory.h" #include "factory.h"
#include "macro-assembler.h" #include "macro-assembler.h"
#include "oprofile-agent.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -60,8 +61,12 @@ void CodeStub::GenerateCode(MacroAssembler* masm) {
void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) { void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
code->set_major_key(MajorKey()); code->set_major_key(MajorKey());
// Add unresolved entries in the code to the fixup list. #ifdef ENABLE_OPROFILE_AGENT
Bootstrapper::AddFixup(code, masm); // Register the generated stub with the OPROFILE agent.
OProfileAgent::CreateNativeCodeRegion(GetName(),
code->instruction_start(),
code->instruction_size());
#endif
LOG(CodeCreateEvent(Logger::STUB_TAG, code, GetName())); LOG(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
Counters::total_stubs_code_size.Increment(code->instruction_size()); Counters::total_stubs_code_size.Increment(code->instruction_size());
@ -149,13 +154,16 @@ Object* CodeStub::TryGetCode() {
} }
const char* CodeStub::MajorName(CodeStub::Major major_key) { const char* CodeStub::MajorName(CodeStub::Major major_key,
bool allow_unknown_keys) {
switch (major_key) { switch (major_key) {
#define DEF_CASE(name) case name: return #name; #define DEF_CASE(name) case name: return #name;
CODE_STUB_LIST(DEF_CASE) CODE_STUB_LIST(DEF_CASE)
#undef DEF_CASE #undef DEF_CASE
default: default:
UNREACHABLE(); if (!allow_unknown_keys) {
UNREACHABLE();
}
return NULL; return NULL;
} }
} }

5
deps/v8/src/code-stubs.h

@ -55,6 +55,7 @@ namespace internal {
V(CounterOp) \ V(CounterOp) \
V(ArgumentsAccess) \ V(ArgumentsAccess) \
V(RegExpExec) \ V(RegExpExec) \
V(NumberToString) \
V(CEntry) \ V(CEntry) \
V(JSEntry) \ V(JSEntry) \
V(DebuggerStatement) V(DebuggerStatement)
@ -100,7 +101,7 @@ class CodeStub BASE_EMBEDDED {
static int MinorKeyFromKey(uint32_t key) { static int MinorKeyFromKey(uint32_t key) {
return MinorKeyBits::decode(key); return MinorKeyBits::decode(key);
}; };
static const char* MajorName(Major major_key); static const char* MajorName(Major major_key, bool allow_unknown_keys);
virtual ~CodeStub() {} virtual ~CodeStub() {}
@ -138,7 +139,7 @@ class CodeStub BASE_EMBEDDED {
virtual InLoopFlag InLoop() { return NOT_IN_LOOP; } virtual InLoopFlag InLoop() { return NOT_IN_LOOP; }
// Returns a name for logging/debugging purposes. // Returns a name for logging/debugging purposes.
virtual const char* GetName() { return MajorName(MajorKey()); } virtual const char* GetName() { return MajorName(MajorKey(), false); }
#ifdef DEBUG #ifdef DEBUG
virtual void Print() { PrintF("%s\n", GetName()); } virtual void Print() { PrintF("%s\n", GetName()); }

41
deps/v8/src/codegen-inl.h

@ -30,6 +30,7 @@
#define V8_CODEGEN_INL_H_ #define V8_CODEGEN_INL_H_
#include "codegen.h" #include "codegen.h"
#include "compiler.h"
#include "register-allocator-inl.h" #include "register-allocator-inl.h"
#if V8_TARGET_ARCH_IA32 #if V8_TARGET_ARCH_IA32
@ -38,6 +39,8 @@
#include "x64/codegen-x64-inl.h" #include "x64/codegen-x64-inl.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/codegen-arm-inl.h" #include "arm/codegen-arm-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/codegen-mips-inl.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif
@ -46,42 +49,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#define __ ACCESS_MASM(masm_) Handle<Script> CodeGenerator::script() { return info_->script(); }
bool CodeGenerator::is_eval() { return info_->is_eval(); }
// -----------------------------------------------------------------------------
// Support for "structured" code comments.
//
// By selecting matching brackets in disassembler output,
// code segments can be identified more easily.
#ifdef DEBUG
class Comment BASE_EMBEDDED {
public:
Comment(MacroAssembler* masm, const char* msg) : masm_(masm), msg_(msg) {
__ RecordComment(msg);
}
~Comment() {
if (msg_[0] == '[') __ RecordComment("]");
}
private:
MacroAssembler* masm_;
const char* msg_;
};
#else
class Comment BASE_EMBEDDED {
public:
Comment(MacroAssembler*, const char*) {}
};
#endif // DEBUG
#undef __
} } // namespace v8::internal } } // namespace v8::internal

75
deps/v8/src/codegen.cc

@ -31,6 +31,7 @@
#include "codegen-inl.h" #include "codegen-inl.h"
#include "compiler.h" #include "compiler.h"
#include "debug.h" #include "debug.h"
#include "liveedit.h"
#include "oprofile-agent.h" #include "oprofile-agent.h"
#include "prettyprinter.h" #include "prettyprinter.h"
#include "register-allocator-inl.h" #include "register-allocator-inl.h"
@ -42,6 +43,24 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#define __ ACCESS_MASM(masm_)
#ifdef DEBUG
Comment::Comment(MacroAssembler* masm, const char* msg)
: masm_(masm), msg_(msg) {
__ RecordComment(msg);
}
Comment::~Comment() {
if (msg_[0] == '[') __ RecordComment("]");
}
#endif // DEBUG
#undef __
CodeGenerator* CodeGeneratorScope::top_ = NULL; CodeGenerator* CodeGeneratorScope::top_ = NULL;
@ -126,7 +145,7 @@ void CodeGenerator::DeleteFrame() {
} }
void CodeGenerator::MakeCodePrologue(FunctionLiteral* fun) { void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
#ifdef DEBUG #ifdef DEBUG
bool print_source = false; bool print_source = false;
bool print_ast = false; bool print_ast = false;
@ -147,60 +166,61 @@ void CodeGenerator::MakeCodePrologue(FunctionLiteral* fun) {
if (FLAG_trace_codegen || print_source || print_ast) { if (FLAG_trace_codegen || print_source || print_ast) {
PrintF("*** Generate code for %s function: ", ftype); PrintF("*** Generate code for %s function: ", ftype);
fun->name()->ShortPrint(); info->function()->name()->ShortPrint();
PrintF(" ***\n"); PrintF(" ***\n");
} }
if (print_source) { if (print_source) {
PrintF("--- Source from AST ---\n%s\n", PrettyPrinter().PrintProgram(fun)); PrintF("--- Source from AST ---\n%s\n",
PrettyPrinter().PrintProgram(info->function()));
} }
if (print_ast) { if (print_ast) {
PrintF("--- AST ---\n%s\n", AstPrinter().PrintProgram(fun)); PrintF("--- AST ---\n%s\n",
AstPrinter().PrintProgram(info->function()));
} }
if (print_json_ast) { if (print_json_ast) {
JsonAstBuilder builder; JsonAstBuilder builder;
PrintF("%s", builder.BuildProgram(fun)); PrintF("%s", builder.BuildProgram(info->function()));
} }
#endif // DEBUG #endif // DEBUG
} }
Handle<Code> CodeGenerator::MakeCodeEpilogue(FunctionLiteral* fun, Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
MacroAssembler* masm,
Code::Flags flags, Code::Flags flags,
Handle<Script> script) { CompilationInfo* info) {
// Allocate and install the code. // Allocate and install the code.
CodeDesc desc; CodeDesc desc;
masm->GetCode(&desc); masm->GetCode(&desc);
ZoneScopeInfo sinfo(fun->scope()); ZoneScopeInfo sinfo(info->scope());
Handle<Code> code = Handle<Code> code =
Factory::NewCode(desc, &sinfo, flags, masm->CodeObject()); Factory::NewCode(desc, &sinfo, flags, masm->CodeObject());
// Add unresolved entries in the code to the fixup list.
Bootstrapper::AddFixup(*code, masm);
#ifdef ENABLE_DISASSEMBLER #ifdef ENABLE_DISASSEMBLER
bool print_code = Bootstrapper::IsActive() bool print_code = Bootstrapper::IsActive()
? FLAG_print_builtin_code ? FLAG_print_builtin_code
: FLAG_print_code; : FLAG_print_code;
if (print_code) { if (print_code) {
// Print the source code if available. // Print the source code if available.
Handle<Script> script = info->script();
FunctionLiteral* function = info->function();
if (!script->IsUndefined() && !script->source()->IsUndefined()) { if (!script->IsUndefined() && !script->source()->IsUndefined()) {
PrintF("--- Raw source ---\n"); PrintF("--- Raw source ---\n");
StringInputBuffer stream(String::cast(script->source())); StringInputBuffer stream(String::cast(script->source()));
stream.Seek(fun->start_position()); stream.Seek(function->start_position());
// fun->end_position() points to the last character in the stream. We // fun->end_position() points to the last character in the stream. We
// need to compensate by adding one to calculate the length. // need to compensate by adding one to calculate the length.
int source_len = fun->end_position() - fun->start_position() + 1; int source_len =
function->end_position() - function->start_position() + 1;
for (int i = 0; i < source_len; i++) { for (int i = 0; i < source_len; i++) {
if (stream.has_more()) PrintF("%c", stream.GetNext()); if (stream.has_more()) PrintF("%c", stream.GetNext());
} }
PrintF("\n\n"); PrintF("\n\n");
} }
PrintF("--- Code ---\n"); PrintF("--- Code ---\n");
code->Disassemble(*fun->name()->ToCString()); code->Disassemble(*function->name()->ToCString());
} }
#endif // ENABLE_DISASSEMBLER #endif // ENABLE_DISASSEMBLER
@ -214,21 +234,21 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(FunctionLiteral* fun,
// Generate the code. Takes a function literal, generates code for it, assemble // Generate the code. Takes a function literal, generates code for it, assemble
// all the pieces into a Code object. This function is only to be called by // all the pieces into a Code object. This function is only to be called by
// the compiler.cc code. // the compiler.cc code.
Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* fun, Handle<Code> CodeGenerator::MakeCode(CompilationInfo* info) {
Handle<Script> script, LiveEditFunctionTracker live_edit_tracker(info->function());
bool is_eval, Handle<Script> script = info->script();
CompilationInfo* info) {
if (!script->IsUndefined() && !script->source()->IsUndefined()) { if (!script->IsUndefined() && !script->source()->IsUndefined()) {
int len = String::cast(script->source())->length(); int len = String::cast(script->source())->length();
Counters::total_old_codegen_source_size.Increment(len); Counters::total_old_codegen_source_size.Increment(len);
} }
MakeCodePrologue(fun); MakeCodePrologue(info);
// Generate code. // Generate code.
const int kInitialBufferSize = 4 * KB; const int kInitialBufferSize = 4 * KB;
MacroAssembler masm(NULL, kInitialBufferSize); MacroAssembler masm(NULL, kInitialBufferSize);
CodeGenerator cgen(&masm, script, is_eval); CodeGenerator cgen(&masm);
CodeGeneratorScope scope(&cgen); CodeGeneratorScope scope(&cgen);
cgen.Generate(fun, PRIMARY, info); live_edit_tracker.RecordFunctionScope(info->function()->scope());
cgen.Generate(info, PRIMARY);
if (cgen.HasStackOverflow()) { if (cgen.HasStackOverflow()) {
ASSERT(!Top::has_pending_exception()); ASSERT(!Top::has_pending_exception());
return Handle<Code>::null(); return Handle<Code>::null();
@ -236,7 +256,9 @@ Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* fun,
InLoopFlag in_loop = (cgen.loop_nesting() != 0) ? IN_LOOP : NOT_IN_LOOP; InLoopFlag in_loop = (cgen.loop_nesting() != 0) ? IN_LOOP : NOT_IN_LOOP;
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop); Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
return MakeCodeEpilogue(fun, cgen.masm(), flags, script); Handle<Code> result = MakeCodeEpilogue(cgen.masm(), flags, info);
live_edit_tracker.RecordFunctionCode(result);
return result;
} }
@ -355,6 +377,7 @@ CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
{&CodeGenerator::GenerateSubString, "_SubString"}, {&CodeGenerator::GenerateSubString, "_SubString"},
{&CodeGenerator::GenerateStringCompare, "_StringCompare"}, {&CodeGenerator::GenerateStringCompare, "_StringCompare"},
{&CodeGenerator::GenerateRegExpExec, "_RegExpExec"}, {&CodeGenerator::GenerateRegExpExec, "_RegExpExec"},
{&CodeGenerator::GenerateNumberToString, "_NumberToString"},
}; };
@ -506,10 +529,4 @@ void ApiGetterEntryStub::SetCustomCache(Code* value) {
} }
void DebuggerStatementStub::Generate(MacroAssembler* masm) {
Runtime::Function* f = Runtime::FunctionForId(Runtime::kDebugBreak);
masm->TailCallRuntime(ExternalReference(f), 0, f->result_size);
}
} } // namespace v8::internal } } // namespace v8::internal

41
deps/v8/src/codegen.h

@ -31,6 +31,7 @@
#include "ast.h" #include "ast.h"
#include "code-stubs.h" #include "code-stubs.h"
#include "runtime.h" #include "runtime.h"
#include "number-info.h"
// Include the declaration of the architecture defined class CodeGenerator. // Include the declaration of the architecture defined class CodeGenerator.
// The contract to the shared code is that the the CodeGenerator is a subclass // The contract to the shared code is that the the CodeGenerator is a subclass
@ -86,6 +87,8 @@ enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
#include "x64/codegen-x64.h" #include "x64/codegen-x64.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/codegen-arm.h" #include "arm/codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/codegen-mips.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif
@ -96,6 +99,29 @@ namespace v8 {
namespace internal { namespace internal {
// Support for "structured" code comments.
#ifdef DEBUG
class Comment BASE_EMBEDDED {
public:
Comment(MacroAssembler* masm, const char* msg);
~Comment();
private:
MacroAssembler* masm_;
const char* msg_;
};
#else
class Comment BASE_EMBEDDED {
public:
Comment(MacroAssembler*, const char*) {}
};
#endif // DEBUG
// Code generation can be nested. Code generation scopes form a stack // Code generation can be nested. Code generation scopes form a stack
// of active code generators. // of active code generators.
class CodeGeneratorScope BASE_EMBEDDED { class CodeGeneratorScope BASE_EMBEDDED {
@ -390,21 +416,6 @@ class ApiGetterEntryStub : public CodeStub {
}; };
// Mark the debugger statement to be recognized by debugger (by the MajorKey)
class DebuggerStatementStub : public CodeStub {
public:
DebuggerStatementStub() { }
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return DebuggerStatement; }
int MinorKey() { return 0; }
const char* GetName() { return "DebuggerStatementStub"; }
};
class JSEntryStub : public CodeStub { class JSEntryStub : public CodeStub {
public: public:
JSEntryStub() { } JSEntryStub() { }

145
deps/v8/src/compiler.cc

@ -38,20 +38,17 @@
#include "rewriter.h" #include "rewriter.h"
#include "scopes.h" #include "scopes.h"
#include "usage-analyzer.h" #include "usage-analyzer.h"
#include "liveedit.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
static Handle<Code> MakeCode(FunctionLiteral* literal, static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
Handle<Script> script, FunctionLiteral* function = info->function();
Handle<Context> context, ASSERT(function != NULL);
bool is_eval,
CompilationInfo* info) {
ASSERT(literal != NULL);
// Rewrite the AST by introducing .result assignments where needed. // Rewrite the AST by introducing .result assignments where needed.
if (!Rewriter::Process(literal) || !AnalyzeVariableUsage(literal)) { if (!Rewriter::Process(function) || !AnalyzeVariableUsage(function)) {
// Signal a stack overflow by returning a null handle. The stack // Signal a stack overflow by returning a null handle. The stack
// overflow exception will be thrown by the caller. // overflow exception will be thrown by the caller.
return Handle<Code>::null(); return Handle<Code>::null();
@ -62,7 +59,7 @@ static Handle<Code> MakeCode(FunctionLiteral* literal,
// the top scope only contains the single lazily compiled function, // the top scope only contains the single lazily compiled function,
// so this doesn't re-allocate variables repeatedly. // so this doesn't re-allocate variables repeatedly.
HistogramTimerScope timer(&Counters::variable_allocation); HistogramTimerScope timer(&Counters::variable_allocation);
Scope* top = literal->scope(); Scope* top = info->scope();
while (top->outer_scope() != NULL) top = top->outer_scope(); while (top->outer_scope() != NULL) top = top->outer_scope();
top->AllocateVariables(context); top->AllocateVariables(context);
} }
@ -71,12 +68,12 @@ static Handle<Code> MakeCode(FunctionLiteral* literal,
if (Bootstrapper::IsActive() ? if (Bootstrapper::IsActive() ?
FLAG_print_builtin_scopes : FLAG_print_builtin_scopes :
FLAG_print_scopes) { FLAG_print_scopes) {
literal->scope()->Print(); info->scope()->Print();
} }
#endif #endif
// Optimize the AST. // Optimize the AST.
if (!Rewriter::Optimize(literal)) { if (!Rewriter::Optimize(function)) {
// Signal a stack overflow by returning a null handle. The stack // Signal a stack overflow by returning a null handle. The stack
// overflow exception will be thrown by the caller. // overflow exception will be thrown by the caller.
return Handle<Code>::null(); return Handle<Code>::null();
@ -98,25 +95,25 @@ static Handle<Code> MakeCode(FunctionLiteral* literal,
Handle<SharedFunctionInfo> shared = info->shared_info(); Handle<SharedFunctionInfo> shared = info->shared_info();
bool is_run_once = (shared.is_null()) bool is_run_once = (shared.is_null())
? literal->scope()->is_global_scope() ? info->scope()->is_global_scope()
: (shared->is_toplevel() || shared->try_full_codegen()); : (shared->is_toplevel() || shared->try_full_codegen());
if (FLAG_always_full_compiler || (FLAG_full_compiler && is_run_once)) { if (FLAG_always_full_compiler || (FLAG_full_compiler && is_run_once)) {
FullCodeGenSyntaxChecker checker; FullCodeGenSyntaxChecker checker;
checker.Check(literal); checker.Check(function);
if (checker.has_supported_syntax()) { if (checker.has_supported_syntax()) {
return FullCodeGenerator::MakeCode(literal, script, is_eval); return FullCodeGenerator::MakeCode(info);
} }
} else if (FLAG_always_fast_compiler || } else if (FLAG_always_fast_compiler ||
(FLAG_fast_compiler && !is_run_once)) { (FLAG_fast_compiler && !is_run_once)) {
FastCodeGenSyntaxChecker checker; FastCodeGenSyntaxChecker checker;
checker.Check(literal, info); checker.Check(info);
if (checker.has_supported_syntax()) { if (checker.has_supported_syntax()) {
return FastCodeGenerator::MakeCode(literal, script, is_eval, info); return FastCodeGenerator::MakeCode(info);
} }
} }
return CodeGenerator::MakeCode(literal, script, is_eval, info); return CodeGenerator::MakeCode(info);
} }
@ -180,10 +177,8 @@ static Handle<JSFunction> MakeFunction(bool is_global,
HistogramTimerScope timer(rate); HistogramTimerScope timer(rate);
// Compile the code. // Compile the code.
CompilationInfo info(Handle<SharedFunctionInfo>::null(), CompilationInfo info(lit, script, is_eval);
Handle<Object>::null(), // No receiver. Handle<Code> code = MakeCode(context, &info);
0); // Not nested in a loop.
Handle<Code> code = MakeCode(lit, script, context, is_eval, &info);
// Check for stack-overflow exceptions. // Check for stack-overflow exceptions.
if (code.is_null()) { if (code.is_null()) {
@ -243,7 +238,8 @@ Handle<JSFunction> Compiler::Compile(Handle<String> source,
Handle<Object> script_name, Handle<Object> script_name,
int line_offset, int column_offset, int line_offset, int column_offset,
v8::Extension* extension, v8::Extension* extension,
ScriptDataImpl* input_pre_data) { ScriptDataImpl* input_pre_data,
Handle<Object> script_data) {
int source_length = source->length(); int source_length = source->length();
Counters::total_load_size.Increment(source_length); Counters::total_load_size.Increment(source_length);
Counters::total_compile_size.Increment(source_length); Counters::total_compile_size.Increment(source_length);
@ -277,6 +273,9 @@ Handle<JSFunction> Compiler::Compile(Handle<String> source,
script->set_column_offset(Smi::FromInt(column_offset)); script->set_column_offset(Smi::FromInt(column_offset));
} }
script->set_data(script_data.is_null() ? Heap::undefined_value()
: *script_data);
// Compile the function and add it to the cache. // Compile the function and add it to the cache.
result = MakeFunction(true, result = MakeFunction(true,
false, false,
@ -355,7 +354,6 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
// Compute name, source code and script data. // Compute name, source code and script data.
Handle<SharedFunctionInfo> shared = info->shared_info(); Handle<SharedFunctionInfo> shared = info->shared_info();
Handle<String> name(String::cast(shared->name())); Handle<String> name(String::cast(shared->name()));
Handle<Script> script(Script::cast(shared->script()));
int start_position = shared->start_position(); int start_position = shared->start_position();
int end_position = shared->end_position(); int end_position = shared->end_position();
@ -364,7 +362,8 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
// Generate the AST for the lazily compiled function. The AST may be // Generate the AST for the lazily compiled function. The AST may be
// NULL in case of parser stack overflow. // NULL in case of parser stack overflow.
FunctionLiteral* lit = MakeLazyAST(script, name, FunctionLiteral* lit = MakeLazyAST(info->script(),
name,
start_position, start_position,
end_position, end_position,
is_expression); is_expression);
@ -374,6 +373,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
ASSERT(Top::has_pending_exception()); ASSERT(Top::has_pending_exception());
return false; return false;
} }
info->set_function(lit);
// Measure how long it takes to do the lazy compilation; only take // Measure how long it takes to do the lazy compilation; only take
// the rest of the function into account to avoid overlap with the // the rest of the function into account to avoid overlap with the
@ -381,11 +381,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
HistogramTimerScope timer(&Counters::compile_lazy); HistogramTimerScope timer(&Counters::compile_lazy);
// Compile the code. // Compile the code.
Handle<Code> code = MakeCode(lit, Handle<Code> code = MakeCode(Handle<Context>::null(), info);
script,
Handle<Context>::null(),
false,
info);
// Check for stack-overflow exception. // Check for stack-overflow exception.
if (code.is_null()) { if (code.is_null()) {
@ -394,28 +390,12 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
} }
#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT #if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
// Log the code generation. If source information is available include script LogCodeCreateEvent(Logger::LAZY_COMPILE_TAG,
// name and line number. Check explicit whether logging is enabled as finding name,
// the line number is not for free. Handle<String>(shared->inferred_name()),
if (Logger::is_logging() || OProfileAgent::is_enabled()) { start_position,
Handle<String> func_name(name->length() > 0 ? info->script(),
*name : shared->inferred_name()); code);
if (script->name()->IsString()) {
int line_num = GetScriptLineNumber(script, start_position) + 1;
LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, *code, *func_name,
String::cast(script->name()), line_num));
OProfileAgent::CreateNativeCodeRegion(*func_name,
String::cast(script->name()),
line_num,
code->instruction_start(),
code->instruction_size());
} else {
LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, *code, *func_name));
OProfileAgent::CreateNativeCodeRegion(*func_name,
code->instruction_start(),
code->instruction_size());
}
}
#endif #endif
// Update the shared function info with the compiled code. // Update the shared function info with the compiled code.
@ -450,7 +430,8 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
// compiled. These builtins cannot be handled lazily by the parser, // compiled. These builtins cannot be handled lazily by the parser,
// since we have to know if a function uses the special natives // since we have to know if a function uses the special natives
// syntax, which is something the parser records. // syntax, which is something the parser records.
bool allow_lazy = literal->AllowsLazyCompilation(); bool allow_lazy = literal->AllowsLazyCompilation() &&
!LiveEditFunctionTracker::IsActive();
// Generate code // Generate code
Handle<Code> code; Handle<Code> code;
@ -466,9 +447,7 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
// Generate code and return it. The way that the compilation mode // Generate code and return it. The way that the compilation mode
// is controlled by the command-line flags is described in // is controlled by the command-line flags is described in
// the static helper function MakeCode. // the static helper function MakeCode.
CompilationInfo info(Handle<SharedFunctionInfo>::null(), CompilationInfo info(literal, script, false);
Handle<Object>::null(), // No receiver.
0); // Not nested in a loop.
CHECK(!FLAG_always_full_compiler || !FLAG_always_fast_compiler); CHECK(!FLAG_always_full_compiler || !FLAG_always_fast_compiler);
bool is_run_once = literal->try_full_codegen(); bool is_run_once = literal->try_full_codegen();
@ -477,9 +456,7 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
FullCodeGenSyntaxChecker checker; FullCodeGenSyntaxChecker checker;
checker.Check(literal); checker.Check(literal);
if (checker.has_supported_syntax()) { if (checker.has_supported_syntax()) {
code = FullCodeGenerator::MakeCode(literal, code = FullCodeGenerator::MakeCode(&info);
script,
false); // Not eval.
is_compiled = true; is_compiled = true;
} }
} else if (FLAG_always_fast_compiler || } else if (FLAG_always_fast_compiler ||
@ -487,19 +464,16 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
// Since we are not lazily compiling we do not have a receiver to // Since we are not lazily compiling we do not have a receiver to
// specialize for. // specialize for.
FastCodeGenSyntaxChecker checker; FastCodeGenSyntaxChecker checker;
checker.Check(literal, &info); checker.Check(&info);
if (checker.has_supported_syntax()) { if (checker.has_supported_syntax()) {
code = FastCodeGenerator::MakeCode(literal, script, false, &info); code = FastCodeGenerator::MakeCode(&info);
is_compiled = true; is_compiled = true;
} }
} }
if (!is_compiled) { if (!is_compiled) {
// We fall back to the classic V8 code generator. // We fall back to the classic V8 code generator.
code = CodeGenerator::MakeCode(literal, code = CodeGenerator::MakeCode(&info);
script,
false, // Not eval.
&info);
} }
// Check for stack-overflow exception. // Check for stack-overflow exception.
@ -509,12 +483,14 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
} }
// Function compilation complete. // Function compilation complete.
LOG(CodeCreateEvent(Logger::FUNCTION_TAG, *code, *literal->name()));
#ifdef ENABLE_OPROFILE_AGENT #if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
OProfileAgent::CreateNativeCodeRegion(*literal->name(), LogCodeCreateEvent(Logger::FUNCTION_TAG,
code->instruction_start(), literal->name(),
code->instruction_size()); literal->inferred_name(),
literal->start_position(),
script,
code);
#endif #endif
} }
@ -562,4 +538,35 @@ void Compiler::SetFunctionInfo(Handle<JSFunction> fun,
} }
#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
void Compiler::LogCodeCreateEvent(Logger::LogEventsAndTags tag,
Handle<String> name,
Handle<String> inferred_name,
int start_position,
Handle<Script> script,
Handle<Code> code) {
// Log the code generation. If source information is available
// include script name and line number. Check explicitly whether
// logging is enabled as finding the line number is not free.
if (Logger::is_logging() || OProfileAgent::is_enabled()) {
Handle<String> func_name(name->length() > 0 ? *name : *inferred_name);
if (script->name()->IsString()) {
int line_num = GetScriptLineNumber(script, start_position) + 1;
LOG(CodeCreateEvent(tag, *code, *func_name,
String::cast(script->name()), line_num));
OProfileAgent::CreateNativeCodeRegion(*func_name,
String::cast(script->name()),
line_num,
code->instruction_start(),
code->instruction_size());
} else {
LOG(CodeCreateEvent(tag, *code, *func_name));
OProfileAgent::CreateNativeCodeRegion(*func_name,
code->instruction_start(),
code->instruction_size());
}
}
}
#endif
} } // namespace v8::internal } } // namespace v8::internal

129
deps/v8/src/compiler.h

@ -28,45 +28,136 @@
#ifndef V8_COMPILER_H_ #ifndef V8_COMPILER_H_
#define V8_COMPILER_H_ #define V8_COMPILER_H_
#include "ast.h"
#include "frame-element.h" #include "frame-element.h"
#include "parser.h" #include "parser.h"
#include "register-allocator.h"
#include "zone.h" #include "zone.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// CompilationInfo encapsulates some information known at compile time. // CompilationInfo encapsulates some information known at compile time. It
// is constructed based on the resources available at compile-time.
class CompilationInfo BASE_EMBEDDED { class CompilationInfo BASE_EMBEDDED {
public: public:
CompilationInfo(Handle<SharedFunctionInfo> shared_info, // Lazy compilation of a JSFunction.
Handle<Object> receiver, CompilationInfo(Handle<JSFunction> closure,
int loop_nesting) int loop_nesting,
: shared_info_(shared_info), Handle<Object> receiver)
receiver_(receiver), : closure_(closure),
function_(NULL),
is_eval_(false),
loop_nesting_(loop_nesting), loop_nesting_(loop_nesting),
has_this_properties_(false), receiver_(receiver) {
has_globals_(false) { Initialize();
ASSERT(!closure_.is_null() &&
shared_info_.is_null() &&
script_.is_null());
} }
Handle<SharedFunctionInfo> shared_info() { return shared_info_; } // Lazy compilation based on SharedFunctionInfo.
explicit CompilationInfo(Handle<SharedFunctionInfo> shared_info)
: shared_info_(shared_info),
function_(NULL),
is_eval_(false),
loop_nesting_(0) {
Initialize();
ASSERT(closure_.is_null() &&
!shared_info_.is_null() &&
script_.is_null());
}
bool has_receiver() { return !receiver_.is_null(); } // Eager compilation.
Handle<Object> receiver() { return receiver_; } CompilationInfo(FunctionLiteral* literal, Handle<Script> script, bool is_eval)
: script_(script),
function_(literal),
is_eval_(is_eval),
loop_nesting_(0) {
Initialize();
ASSERT(closure_.is_null() &&
shared_info_.is_null() &&
!script_.is_null());
}
// We can only get a JSFunction if we actually have one.
Handle<JSFunction> closure() { return closure_; }
// We can get a SharedFunctionInfo from a JSFunction or if we actually
// have one.
Handle<SharedFunctionInfo> shared_info() {
if (!closure().is_null()) {
return Handle<SharedFunctionInfo>(closure()->shared());
} else {
return shared_info_;
}
}
// We can always get a script. Either we have one or we can get a shared
// function info.
Handle<Script> script() {
if (!script_.is_null()) {
return script_;
} else {
ASSERT(shared_info()->script()->IsScript());
return Handle<Script>(Script::cast(shared_info()->script()));
}
}
// There should always be a function literal, but it may be set after
// construction (for lazy compilation).
FunctionLiteral* function() { return function_; }
void set_function(FunctionLiteral* literal) {
ASSERT(function_ == NULL);
function_ = literal;
}
// Simple accessors.
bool is_eval() { return is_eval_; }
int loop_nesting() { return loop_nesting_; } int loop_nesting() { return loop_nesting_; }
bool has_receiver() { return !receiver_.is_null(); }
Handle<Object> receiver() { return receiver_; }
// Accessors for mutable fields, possibly set by analysis passes with
// default values given by Initialize.
bool has_this_properties() { return has_this_properties_; } bool has_this_properties() { return has_this_properties_; }
void set_has_this_properties(bool flag) { has_this_properties_ = flag; } void set_has_this_properties(bool flag) { has_this_properties_ = flag; }
bool has_global_object() {
return !closure().is_null() && (closure()->context()->global() != NULL);
}
GlobalObject* global_object() {
return has_global_object() ? closure()->context()->global() : NULL;
}
bool has_globals() { return has_globals_; } bool has_globals() { return has_globals_; }
void set_has_globals(bool flag) { has_globals_ = flag; } void set_has_globals(bool flag) { has_globals_ = flag; }
// Derived accessors.
Scope* scope() { return function()->scope(); }
private: private:
void Initialize() {
has_this_properties_ = false;
has_globals_ = false;
}
Handle<JSFunction> closure_;
Handle<SharedFunctionInfo> shared_info_; Handle<SharedFunctionInfo> shared_info_;
Handle<Object> receiver_; Handle<Script> script_;
FunctionLiteral* function_;
bool is_eval_;
int loop_nesting_; int loop_nesting_;
Handle<Object> receiver_;
bool has_this_properties_; bool has_this_properties_;
bool has_globals_; bool has_globals_;
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
}; };
@ -94,7 +185,8 @@ class Compiler : public AllStatic {
Handle<Object> script_name, Handle<Object> script_name,
int line_offset, int column_offset, int line_offset, int column_offset,
v8::Extension* extension, v8::Extension* extension,
ScriptDataImpl* script_Data); ScriptDataImpl* pre_data,
Handle<Object> script_data);
// Compile a String source within a context for Eval. // Compile a String source within a context for Eval.
static Handle<JSFunction> CompileEval(Handle<String> source, static Handle<JSFunction> CompileEval(Handle<String> source,
@ -119,6 +211,17 @@ class Compiler : public AllStatic {
FunctionLiteral* lit, FunctionLiteral* lit,
bool is_toplevel, bool is_toplevel,
Handle<Script> script); Handle<Script> script);
private:
#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
static void LogCodeCreateEvent(Logger::LogEventsAndTags tag,
Handle<String> name,
Handle<String> inferred_name,
int start_position,
Handle<Script> script,
Handle<Code> code);
#endif
}; };

1
deps/v8/src/contexts.h

@ -76,7 +76,6 @@ enum ContextLookupFlags {
V(TO_INTEGER_FUN_INDEX, JSFunction, to_integer_fun) \ V(TO_INTEGER_FUN_INDEX, JSFunction, to_integer_fun) \
V(TO_UINT32_FUN_INDEX, JSFunction, to_uint32_fun) \ V(TO_UINT32_FUN_INDEX, JSFunction, to_uint32_fun) \
V(TO_INT32_FUN_INDEX, JSFunction, to_int32_fun) \ V(TO_INT32_FUN_INDEX, JSFunction, to_int32_fun) \
V(TO_BOOLEAN_FUN_INDEX, JSFunction, to_boolean_fun) \
V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \ V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \ V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \
V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \ V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \

4
deps/v8/src/d8-readline.cc

@ -27,8 +27,8 @@
#include <cstdio> // NOLINT #include <cstdio> // NOLINT
#include <readline/readline.h> #include <readline/readline.h> // NOLINT
#include <readline/history.h> #include <readline/history.h> // NOLINT
#include "d8.h" #include "d8.h"

318
deps/v8/src/data-flow.cc

@ -33,8 +33,9 @@ namespace v8 {
namespace internal { namespace internal {
void AstLabeler::Label(FunctionLiteral* fun) { void AstLabeler::Label(CompilationInfo* info) {
VisitStatements(fun->body()); info_ = info;
VisitStatements(info_->function()->body());
} }
@ -162,6 +163,10 @@ void AstLabeler::VisitSlot(Slot* expr) {
void AstLabeler::VisitVariableProxy(VariableProxy* expr) { void AstLabeler::VisitVariableProxy(VariableProxy* expr) {
expr->set_num(next_number_++); expr->set_num(next_number_++);
Variable* var = expr->var();
if (var->is_global() && !var->is_this()) {
info_->set_has_globals(true);
}
} }
@ -194,15 +199,11 @@ void AstLabeler::VisitCatchExtensionObject(
void AstLabeler::VisitAssignment(Assignment* expr) { void AstLabeler::VisitAssignment(Assignment* expr) {
Property* prop = expr->target()->AsProperty(); Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL); ASSERT(prop != NULL);
if (prop != NULL) { ASSERT(prop->key()->IsPropertyName());
ASSERT(prop->key()->IsPropertyName()); VariableProxy* proxy = prop->obj()->AsVariableProxy();
VariableProxy* proxy = prop->obj()->AsVariableProxy(); USE(proxy);
if (proxy != NULL && proxy->var()->is_this()) { ASSERT(proxy != NULL && proxy->var()->is_this());
has_this_properties_ = true; info()->set_has_this_properties(true);
} else {
Visit(prop->obj());
}
}
Visit(expr->value()); Visit(expr->value());
expr->set_num(next_number_++); expr->set_num(next_number_++);
} }
@ -214,7 +215,12 @@ void AstLabeler::VisitThrow(Throw* expr) {
void AstLabeler::VisitProperty(Property* expr) { void AstLabeler::VisitProperty(Property* expr) {
UNREACHABLE(); ASSERT(expr->key()->IsPropertyName());
VariableProxy* proxy = expr->obj()->AsVariableProxy();
USE(proxy);
ASSERT(proxy != NULL && proxy->var()->is_this());
info()->set_has_this_properties(true);
expr->set_num(next_number_++);
} }
@ -264,4 +270,292 @@ void AstLabeler::VisitDeclaration(Declaration* decl) {
UNREACHABLE(); UNREACHABLE();
} }
ZoneList<Expression*>* VarUseMap::Lookup(Variable* var) {
HashMap::Entry* entry = HashMap::Lookup(var, var->name()->Hash(), true);
if (entry->value == NULL) {
entry->value = new ZoneList<Expression*>(1);
}
return reinterpret_cast<ZoneList<Expression*>*>(entry->value);
}
void LivenessAnalyzer::Analyze(FunctionLiteral* fun) {
// Process the function body.
VisitStatements(fun->body());
// All variables are implicitly defined at the function start.
// Record a definition of all variables live at function entry.
for (HashMap::Entry* p = live_vars_.Start();
p != NULL;
p = live_vars_.Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->key);
RecordDef(var, fun);
}
}
void LivenessAnalyzer::VisitStatements(ZoneList<Statement*>* stmts) {
// Visit statements right-to-left.
for (int i = stmts->length() - 1; i >= 0; i--) {
Visit(stmts->at(i));
}
}
void LivenessAnalyzer::RecordUse(Variable* var, Expression* expr) {
ASSERT(var->is_global() || var->is_this());
ZoneList<Expression*>* uses = live_vars_.Lookup(var);
uses->Add(expr);
}
void LivenessAnalyzer::RecordDef(Variable* var, Expression* expr) {
ASSERT(var->is_global() || var->is_this());
// We do not support other expressions that can define variables.
ASSERT(expr->AsFunctionLiteral() != NULL);
// Add the variable to the list of defined variables.
if (expr->defined_vars() == NULL) {
expr->set_defined_vars(new ZoneList<DefinitionInfo*>(1));
}
DefinitionInfo* def = new DefinitionInfo();
expr->AsFunctionLiteral()->defined_vars()->Add(def);
// Compute the last use of the definition. The variable uses are
// inserted in reversed evaluation order. The first element
// in the list of live uses is the last use.
ZoneList<Expression*>* uses = live_vars_.Lookup(var);
while (uses->length() > 0) {
Expression* use_site = uses->RemoveLast();
use_site->set_var_def(def);
if (uses->length() == 0) {
def->set_last_use(use_site);
}
}
}
// Visitor functions for live variable analysis.
void LivenessAnalyzer::VisitDeclaration(Declaration* decl) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitBlock(Block* stmt) {
VisitStatements(stmt->statements());
}
void LivenessAnalyzer::VisitExpressionStatement(
ExpressionStatement* stmt) {
Visit(stmt->expression());
}
void LivenessAnalyzer::VisitEmptyStatement(EmptyStatement* stmt) {
// Do nothing.
}
void LivenessAnalyzer::VisitIfStatement(IfStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitContinueStatement(ContinueStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitBreakStatement(BreakStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitReturnStatement(ReturnStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitWithEnterStatement(
WithEnterStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitWithExitStatement(WithExitStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitSwitchStatement(SwitchStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitDoWhileStatement(DoWhileStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitWhileStatement(WhileStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitForStatement(ForStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitForInStatement(ForInStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitTryCatchStatement(TryCatchStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitTryFinallyStatement(
TryFinallyStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitDebuggerStatement(
DebuggerStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitFunctionLiteral(FunctionLiteral* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitFunctionBoilerplateLiteral(
FunctionBoilerplateLiteral* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitConditional(Conditional* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitSlot(Slot* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitVariableProxy(VariableProxy* expr) {
Variable* var = expr->var();
ASSERT(var->is_global());
ASSERT(!var->is_this());
RecordUse(var, expr);
}
void LivenessAnalyzer::VisitLiteral(Literal* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitRegExpLiteral(RegExpLiteral* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitObjectLiteral(ObjectLiteral* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitArrayLiteral(ArrayLiteral* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitCatchExtensionObject(
CatchExtensionObject* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitAssignment(Assignment* expr) {
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
ASSERT(prop->key()->IsPropertyName());
VariableProxy* proxy = prop->obj()->AsVariableProxy();
ASSERT(proxy != NULL && proxy->var()->is_this());
// Record use of this at the assignment node. Assignments to
// this-properties are treated like unary operations.
RecordUse(proxy->var(), expr);
// Visit right-hand side.
Visit(expr->value());
}
void LivenessAnalyzer::VisitThrow(Throw* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitProperty(Property* expr) {
ASSERT(expr->key()->IsPropertyName());
VariableProxy* proxy = expr->obj()->AsVariableProxy();
ASSERT(proxy != NULL && proxy->var()->is_this());
RecordUse(proxy->var(), expr);
}
void LivenessAnalyzer::VisitCall(Call* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitCallNew(CallNew* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitCallRuntime(CallRuntime* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitUnaryOperation(UnaryOperation* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitCountOperation(CountOperation* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitBinaryOperation(BinaryOperation* expr) {
// Visit child nodes in reverse evaluation order.
Visit(expr->right());
Visit(expr->left());
}
void LivenessAnalyzer::VisitCompareOperation(CompareOperation* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitThisFunction(ThisFunction* expr) {
UNREACHABLE();
}
} } // namespace v8::internal } } // namespace v8::internal

62
deps/v8/src/data-flow.h

@ -29,7 +29,7 @@
#define V8_DATAFLOW_H_ #define V8_DATAFLOW_H_
#include "ast.h" #include "ast.h"
#include "scopes.h" #include "compiler.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -38,13 +38,13 @@ namespace internal {
// their evaluation order (post-order left-to-right traversal). // their evaluation order (post-order left-to-right traversal).
class AstLabeler: public AstVisitor { class AstLabeler: public AstVisitor {
public: public:
AstLabeler() : next_number_(0), has_this_properties_(false) {} AstLabeler() : next_number_(0) {}
void Label(FunctionLiteral* fun); void Label(CompilationInfo* info);
bool has_this_properties() { return has_this_properties_; }
private: private:
CompilationInfo* info() { return info_; }
void VisitDeclarations(ZoneList<Declaration*>* decls); void VisitDeclarations(ZoneList<Declaration*>* decls);
void VisitStatements(ZoneList<Statement*>* stmts); void VisitStatements(ZoneList<Statement*>* stmts);
@ -56,12 +56,62 @@ class AstLabeler: public AstVisitor {
// Traversal number for labelling AST nodes. // Traversal number for labelling AST nodes.
int next_number_; int next_number_;
bool has_this_properties_; CompilationInfo* info_;
DISALLOW_COPY_AND_ASSIGN(AstLabeler); DISALLOW_COPY_AND_ASSIGN(AstLabeler);
}; };
class VarUseMap : public HashMap {
public:
VarUseMap() : HashMap(VarMatch) {}
ZoneList<Expression*>* Lookup(Variable* var);
private:
static bool VarMatch(void* key1, void* key2) { return key1 == key2; }
};
class DefinitionInfo : public ZoneObject {
public:
explicit DefinitionInfo() : last_use_(NULL) {}
Expression* last_use() { return last_use_; }
void set_last_use(Expression* expr) { last_use_ = expr; }
private:
Expression* last_use_;
Register location_;
};
class LivenessAnalyzer : public AstVisitor {
public:
LivenessAnalyzer() {}
void Analyze(FunctionLiteral* fun);
private:
void VisitStatements(ZoneList<Statement*>* stmts);
void RecordUse(Variable* var, Expression* expr);
void RecordDef(Variable* var, Expression* expr);
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
// Map for tracking the live variables.
VarUseMap live_vars_;
DISALLOW_COPY_AND_ASSIGN(LivenessAnalyzer);
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_DATAFLOW_H_ #endif // V8_DATAFLOW_H_

8
deps/v8/src/debug-delay.js

@ -1934,10 +1934,14 @@ DebugCommandProcessor.prototype.profileRequest_ = function(request, response) {
if (isNaN(modules)) { if (isNaN(modules)) {
return response.failed('Modules is not an integer'); return response.failed('Modules is not an integer');
} }
var tag = parseInt(request.arguments.tag);
if (isNaN(tag)) {
tag = 0;
}
if (request.arguments.command == 'resume') { if (request.arguments.command == 'resume') {
%ProfilerResume(modules); %ProfilerResume(modules, tag);
} else if (request.arguments.command == 'pause') { } else if (request.arguments.command == 'pause') {
%ProfilerPause(modules); %ProfilerPause(modules, tag);
} else { } else {
return response.failed('Unknown command'); return response.failed('Unknown command');
} }

14
deps/v8/src/debug.cc

@ -31,6 +31,7 @@
#include "arguments.h" #include "arguments.h"
#include "bootstrapper.h" #include "bootstrapper.h"
#include "code-stubs.h" #include "code-stubs.h"
#include "codegen.h"
#include "compilation-cache.h" #include "compilation-cache.h"
#include "compiler.h" #include "compiler.h"
#include "debug.h" #include "debug.h"
@ -453,15 +454,7 @@ void BreakLocationIterator::ClearDebugBreakAtIC() {
bool BreakLocationIterator::IsDebuggerStatement() { bool BreakLocationIterator::IsDebuggerStatement() {
if (RelocInfo::IsCodeTarget(rmode())) { return RelocInfo::DEBUG_BREAK == rmode();
Address target = original_rinfo()->target_address();
Code* code = Code::GetCodeFromTargetAddress(target);
if (code->kind() == Code::STUB) {
CodeStub::Major major_key = code->major_key();
return (major_key == CodeStub::DebuggerStatement);
}
}
return false;
} }
@ -690,7 +683,8 @@ bool Debug::CompileDebuggerScript(int index) {
bool allow_natives_syntax = FLAG_allow_natives_syntax; bool allow_natives_syntax = FLAG_allow_natives_syntax;
FLAG_allow_natives_syntax = true; FLAG_allow_natives_syntax = true;
Handle<JSFunction> boilerplate; Handle<JSFunction> boilerplate;
boilerplate = Compiler::Compile(source_code, script_name, 0, 0, NULL, NULL); boilerplate = Compiler::Compile(source_code, script_name, 0, 0, NULL, NULL,
Handle<String>::null());
FLAG_allow_natives_syntax = allow_natives_syntax; FLAG_allow_natives_syntax = allow_natives_syntax;
// Silently ignore stack overflows during compilation. // Silently ignore stack overflows during compilation.

2
deps/v8/src/disassembler.cc

@ -261,7 +261,7 @@ static int DecodeIt(FILE* f,
ASSERT(code->major_key() == CodeStub::MajorKeyFromKey(key)); ASSERT(code->major_key() == CodeStub::MajorKeyFromKey(key));
out.AddFormatted(" %s, %s, ", out.AddFormatted(" %s, %s, ",
Code::Kind2String(kind), Code::Kind2String(kind),
CodeStub::MajorName(code->major_key())); CodeStub::MajorName(code->major_key(), false));
switch (code->major_key()) { switch (code->major_key()) {
case CodeStub::CallFunction: case CodeStub::CallFunction:
out.AddFormatted("argc = %d", minor_key); out.AddFormatted("argc = %d", minor_key);

2
deps/v8/src/execution.cc

@ -91,7 +91,7 @@ static Handle<Object> Invoke(bool construct,
JSEntryFunction entry = FUNCTION_CAST<JSEntryFunction>(code->entry()); JSEntryFunction entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
// Call the function through the right JS entry stub. // Call the function through the right JS entry stub.
byte* entry_address= func->code()->entry(); byte* entry_address = func->code()->entry();
JSFunction* function = *func; JSFunction* function = *func;
Object* receiver_pointer = *receiver; Object* receiver_pointer = *receiver;
value = CALL_GENERATED_CODE(entry, entry_address, function, value = CALL_GENERATED_CODE(entry, entry_address, function,

225
deps/v8/src/fast-codegen.cc

@ -51,8 +51,7 @@ namespace internal {
} while (false) } while (false)
void FastCodeGenSyntaxChecker::Check(FunctionLiteral* fun, void FastCodeGenSyntaxChecker::Check(CompilationInfo* info) {
CompilationInfo* info) {
info_ = info; info_ = info;
// We do not specialize if we do not have a receiver or if it is not a // We do not specialize if we do not have a receiver or if it is not a
@ -64,7 +63,7 @@ void FastCodeGenSyntaxChecker::Check(FunctionLiteral* fun,
// We do not support stack or heap slots (both of which require // We do not support stack or heap slots (both of which require
// allocation). // allocation).
Scope* scope = fun->scope(); Scope* scope = info->scope();
if (scope->num_stack_slots() > 0) { if (scope->num_stack_slots() > 0) {
BAILOUT("Function has stack-allocated locals"); BAILOUT("Function has stack-allocated locals");
} }
@ -76,8 +75,10 @@ void FastCodeGenSyntaxChecker::Check(FunctionLiteral* fun,
CHECK_BAILOUT; CHECK_BAILOUT;
// We do not support empty function bodies. // We do not support empty function bodies.
if (fun->body()->is_empty()) BAILOUT("Function has an empty body"); if (info->function()->body()->is_empty()) {
VisitStatements(fun->body()); BAILOUT("Function has an empty body");
}
VisitStatements(info->function()->body());
} }
@ -88,10 +89,10 @@ void FastCodeGenSyntaxChecker::VisitDeclarations(
void FastCodeGenSyntaxChecker::VisitStatements(ZoneList<Statement*>* stmts) { void FastCodeGenSyntaxChecker::VisitStatements(ZoneList<Statement*>* stmts) {
for (int i = 0, len = stmts->length(); i < len; i++) { if (stmts->length() != 1) {
Visit(stmts->at(i)); BAILOUT("Function body is not a singleton statement.");
CHECK_BAILOUT;
} }
Visit(stmts->at(0));
} }
@ -213,7 +214,24 @@ void FastCodeGenSyntaxChecker::VisitSlot(Slot* expr) {
void FastCodeGenSyntaxChecker::VisitVariableProxy(VariableProxy* expr) { void FastCodeGenSyntaxChecker::VisitVariableProxy(VariableProxy* expr) {
// Only global variable references are supported. // Only global variable references are supported.
Variable* var = expr->var(); Variable* var = expr->var();
if (!var->is_global()) BAILOUT("Non-global variable"); if (!var->is_global() || var->is_this()) BAILOUT("Non-global variable");
// Check if the global variable is existing and non-deletable.
if (info()->has_global_object()) {
LookupResult lookup;
info()->global_object()->Lookup(*expr->name(), &lookup);
if (!lookup.IsProperty()) {
BAILOUT("Non-existing global variable");
}
// We do not handle global variables with accessors or interceptors.
if (lookup.type() != NORMAL) {
BAILOUT("Global variable with accessors or interceptors.");
}
// We do not handle deletable global variables.
if (!lookup.IsDontDelete()) {
BAILOUT("Deletable global variable");
}
}
} }
@ -266,6 +284,9 @@ void FastCodeGenSyntaxChecker::VisitAssignment(Assignment* expr) {
Handle<String> name = Handle<String>::cast(key->handle()); Handle<String> name = Handle<String>::cast(key->handle());
LookupResult lookup; LookupResult lookup;
receiver->Lookup(*name, &lookup); receiver->Lookup(*name, &lookup);
if (!lookup.IsProperty()) {
BAILOUT("Assigned property not found at compile time");
}
if (lookup.holder() != *receiver) BAILOUT("Non-own property assignment"); if (lookup.holder() != *receiver) BAILOUT("Non-own property assignment");
if (!lookup.type() == FIELD) BAILOUT("Non-field property assignment"); if (!lookup.type() == FIELD) BAILOUT("Non-field property assignment");
} else { } else {
@ -283,7 +304,33 @@ void FastCodeGenSyntaxChecker::VisitThrow(Throw* expr) {
void FastCodeGenSyntaxChecker::VisitProperty(Property* expr) { void FastCodeGenSyntaxChecker::VisitProperty(Property* expr) {
BAILOUT("Property"); // We support named this property references.
VariableProxy* proxy = expr->obj()->AsVariableProxy();
if (proxy == NULL || !proxy->var()->is_this()) {
BAILOUT("Non-this-property reference");
}
if (!expr->key()->IsPropertyName()) {
BAILOUT("Non-named-property reference");
}
// We will only specialize for fields on the object itself.
// Expression::IsPropertyName implies that the name is a literal
// symbol but we do not assume that.
Literal* key = expr->key()->AsLiteral();
if (key != NULL && key->handle()->IsString()) {
Handle<Object> receiver = info()->receiver();
Handle<String> name = Handle<String>::cast(key->handle());
LookupResult lookup;
receiver->Lookup(*name, &lookup);
if (!lookup.IsProperty()) {
BAILOUT("Referenced property not found at compile time");
}
if (lookup.holder() != *receiver) BAILOUT("Non-own property reference");
if (!lookup.type() == FIELD) BAILOUT("Non-field property reference");
} else {
UNREACHABLE();
BAILOUT("Unexpected non-string-literal property key");
}
} }
@ -313,7 +360,58 @@ void FastCodeGenSyntaxChecker::VisitCountOperation(CountOperation* expr) {
void FastCodeGenSyntaxChecker::VisitBinaryOperation(BinaryOperation* expr) { void FastCodeGenSyntaxChecker::VisitBinaryOperation(BinaryOperation* expr) {
BAILOUT("BinaryOperation"); // We support bitwise OR.
switch (expr->op()) {
case Token::COMMA:
BAILOUT("BinaryOperation COMMA");
case Token::OR:
BAILOUT("BinaryOperation OR");
case Token::AND:
BAILOUT("BinaryOperation AND");
case Token::BIT_OR:
// We support expressions nested on the left because they only require
// a pair of registers to keep all intermediate values in registers
// (i.e., the expression stack has height no more than two).
if (!expr->right()->IsLeaf()) BAILOUT("expression nested on right");
// We do not allow subexpressions with side effects because we
// (currently) bail out to the beginning of the full function. The
// only expressions with side effects that we would otherwise handle
// are assignments.
if (expr->left()->AsAssignment() != NULL ||
expr->right()->AsAssignment() != NULL) {
BAILOUT("subexpression of binary operation has side effects");
}
Visit(expr->left());
CHECK_BAILOUT;
Visit(expr->right());
break;
case Token::BIT_XOR:
BAILOUT("BinaryOperation BIT_XOR");
case Token::BIT_AND:
BAILOUT("BinaryOperation BIT_AND");
case Token::SHL:
BAILOUT("BinaryOperation SHL");
case Token::SAR:
BAILOUT("BinaryOperation SAR");
case Token::SHR:
BAILOUT("BinaryOperation SHR");
case Token::ADD:
BAILOUT("BinaryOperation ADD");
case Token::SUB:
BAILOUT("BinaryOperation SUB");
case Token::MUL:
BAILOUT("BinaryOperation MUL");
case Token::DIV:
BAILOUT("BinaryOperation DIV");
case Token::MOD:
BAILOUT("BinaryOperation MOD");
default:
UNREACHABLE();
}
} }
@ -332,24 +430,23 @@ void FastCodeGenSyntaxChecker::VisitThisFunction(ThisFunction* expr) {
#define __ ACCESS_MASM(masm()) #define __ ACCESS_MASM(masm())
Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun, Handle<Code> FastCodeGenerator::MakeCode(CompilationInfo* info) {
Handle<Script> script,
bool is_eval,
CompilationInfo* info) {
// Label the AST before calling MakeCodePrologue, so AST node numbers are // Label the AST before calling MakeCodePrologue, so AST node numbers are
// printed with the AST. // printed with the AST.
AstLabeler labeler; AstLabeler labeler;
labeler.Label(fun); labeler.Label(info);
info->set_has_this_properties(labeler.has_this_properties());
LivenessAnalyzer analyzer;
analyzer.Analyze(info->function());
CodeGenerator::MakeCodePrologue(fun); CodeGenerator::MakeCodePrologue(info);
const int kInitialBufferSize = 4 * KB; const int kInitialBufferSize = 4 * KB;
MacroAssembler masm(NULL, kInitialBufferSize); MacroAssembler masm(NULL, kInitialBufferSize);
// Generate the fast-path code. // Generate the fast-path code.
FastCodeGenerator fast_cgen(&masm, script, is_eval); FastCodeGenerator fast_cgen(&masm);
fast_cgen.Generate(fun, info); fast_cgen.Generate(info);
if (fast_cgen.HasStackOverflow()) { if (fast_cgen.HasStackOverflow()) {
ASSERT(!Top::has_pending_exception()); ASSERT(!Top::has_pending_exception());
return Handle<Code>::null(); return Handle<Code>::null();
@ -357,16 +454,16 @@ Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
// Generate the full code for the function in bailout mode, using the same // Generate the full code for the function in bailout mode, using the same
// macro assembler. // macro assembler.
CodeGenerator cgen(&masm, script, is_eval); CodeGenerator cgen(&masm);
CodeGeneratorScope scope(&cgen); CodeGeneratorScope scope(&cgen);
cgen.Generate(fun, CodeGenerator::SECONDARY, info); cgen.Generate(info, CodeGenerator::SECONDARY);
if (cgen.HasStackOverflow()) { if (cgen.HasStackOverflow()) {
ASSERT(!Top::has_pending_exception()); ASSERT(!Top::has_pending_exception());
return Handle<Code>::null(); return Handle<Code>::null();
} }
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP); Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
return CodeGenerator::MakeCodeEpilogue(fun, &masm, flags, script); return CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
} }
@ -483,12 +580,28 @@ void FastCodeGenerator::VisitSlot(Slot* expr) {
void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) { void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
ASSERT(expr->var()->is_global() && !expr->var()->is_this()); ASSERT(expr->var()->is_global() && !expr->var()->is_this());
Comment cmnt(masm(), ";; Global"); // Check if we can compile a global variable load directly from the cell.
if (FLAG_print_ir) { ASSERT(info()->has_global_object());
SmartPointer<char> name = expr->name()->ToCString(); LookupResult lookup;
PrintF("%d: t%d = Global(%s)\n", expr->num(), expr->num(), *name); info()->global_object()->Lookup(*expr->name(), &lookup);
// We only support normal (non-accessor/interceptor) DontDelete properties
// for now.
ASSERT(lookup.IsProperty());
ASSERT_EQ(NORMAL, lookup.type());
ASSERT(lookup.IsDontDelete());
Handle<Object> cell(info()->global_object()->GetPropertyCell(&lookup));
// Global variable lookups do not have side effects, so we do not need to
// emit code if we are in an effect context.
if (!destination().is(no_reg)) {
Comment cmnt(masm(), ";; Global");
if (FLAG_print_ir) {
SmartPointer<char> name = expr->name()->ToCString();
PrintF("%d: t%d = Global(%s) // last_use = %d\n", expr->num(),
expr->num(), *name, expr->var_def()->last_use()->num());
}
EmitGlobalVariableLoad(cell);
} }
EmitGlobalVariableLoad(expr->name());
} }
@ -518,8 +631,13 @@ void FastCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
void FastCodeGenerator::VisitAssignment(Assignment* expr) { void FastCodeGenerator::VisitAssignment(Assignment* expr) {
// Known to be a simple this property assignment. // Known to be a simple this property assignment. Effectively a unary
Visit(expr->value()); // operation.
{ Register my_destination = destination();
set_destination(accumulator0());
Visit(expr->value());
set_destination(my_destination);
}
Property* prop = expr->target()->AsProperty(); Property* prop = expr->target()->AsProperty();
ASSERT_NOT_NULL(prop); ASSERT_NOT_NULL(prop);
@ -529,11 +647,14 @@ void FastCodeGenerator::VisitAssignment(Assignment* expr) {
Handle<String> name = Handle<String> name =
Handle<String>::cast(prop->key()->AsLiteral()->handle()); Handle<String>::cast(prop->key()->AsLiteral()->handle());
Comment cmnt(masm(), ";; Store(this)"); Comment cmnt(masm(), ";; Store to this");
if (FLAG_print_ir) { if (FLAG_print_ir) {
SmartPointer<char> name_string = name->ToCString(); SmartPointer<char> name_string = name->ToCString();
PrintF("%d: t%d = Store(this, \"%s\", t%d)\n", PrintF("%d: ", expr->num());
expr->num(), expr->num(), *name_string, expr->value()->num()); if (!destination().is(no_reg)) PrintF("t%d = ", expr->num());
PrintF("Store(this, \"%s\", t%d) // last_use(this) = %d\n", *name_string,
expr->value()->num(),
expr->var_def()->last_use()->num());
} }
EmitThisPropertyStore(name); EmitThisPropertyStore(name);
@ -546,7 +667,22 @@ void FastCodeGenerator::VisitThrow(Throw* expr) {
void FastCodeGenerator::VisitProperty(Property* expr) { void FastCodeGenerator::VisitProperty(Property* expr) {
UNREACHABLE(); ASSERT_NOT_NULL(expr->obj()->AsVariableProxy());
ASSERT(expr->obj()->AsVariableProxy()->var()->is_this());
ASSERT(expr->key()->IsPropertyName());
if (!destination().is(no_reg)) {
Handle<String> name =
Handle<String>::cast(expr->key()->AsLiteral()->handle());
Comment cmnt(masm(), ";; Load from this");
if (FLAG_print_ir) {
SmartPointer<char> name_string = name->ToCString();
PrintF("%d: t%d = Load(this, \"%s\") // last_use(this) = %d\n",
expr->num(), expr->num(), *name_string,
expr->var_def()->last_use()->num());
}
EmitThisPropertyLoad(name);
}
} }
@ -576,7 +712,26 @@ void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) { void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
UNREACHABLE(); // We support limited binary operations: bitwise OR only allowed to be
// nested on the left.
ASSERT(expr->op() == Token::BIT_OR);
ASSERT(expr->right()->IsLeaf());
{ Register my_destination = destination();
set_destination(accumulator1());
Visit(expr->left());
set_destination(accumulator0());
Visit(expr->right());
set_destination(my_destination);
}
Comment cmnt(masm(), ";; BIT_OR");
if (FLAG_print_ir) {
PrintF("%d: ", expr->num());
if (!destination().is(no_reg)) PrintF("t%d = ", expr->num());
PrintF("BIT_OR(t%d, t%d)\n", expr->left()->num(), expr->right()->num());
}
EmitBitOr();
} }

94
deps/v8/src/fast-codegen.h

@ -42,7 +42,7 @@ class FastCodeGenSyntaxChecker: public AstVisitor {
: info_(NULL), has_supported_syntax_(true) { : info_(NULL), has_supported_syntax_(true) {
} }
void Check(FunctionLiteral* fun, CompilationInfo* info); void Check(CompilationInfo* info);
CompilationInfo* info() { return info_; } CompilationInfo* info() { return info_; }
bool has_supported_syntax() { return has_supported_syntax_; } bool has_supported_syntax() { return has_supported_syntax_; }
@ -65,62 +65,86 @@ class FastCodeGenSyntaxChecker: public AstVisitor {
class FastCodeGenerator: public AstVisitor { class FastCodeGenerator: public AstVisitor {
public: public:
FastCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval) explicit FastCodeGenerator(MacroAssembler* masm)
: masm_(masm), : masm_(masm), info_(NULL), destination_(no_reg), smi_bits_(0) {
script_(script),
is_eval_(is_eval),
function_(NULL),
info_(NULL) {
} }
static Handle<Code> MakeCode(FunctionLiteral* fun, static Handle<Code> MakeCode(CompilationInfo* info);
Handle<Script> script,
bool is_eval,
CompilationInfo* info);
void Generate(FunctionLiteral* fun, CompilationInfo* info); void Generate(CompilationInfo* compilation_info);
private: private:
MacroAssembler* masm() { return masm_; } MacroAssembler* masm() { return masm_; }
FunctionLiteral* function() { return function_; } CompilationInfo* info() { return info_; }
Label* bailout() { return &bailout_; } Label* bailout() { return &bailout_; }
bool has_receiver() { return !info_->receiver().is_null(); } Register destination() { return destination_; }
Handle<Object> receiver() { return info_->receiver(); } void set_destination(Register reg) { destination_ = reg; }
bool has_this_properties() { return info_->has_this_properties(); }
FunctionLiteral* function() { return info_->function(); }
Scope* scope() { return info_->scope(); }
// Platform-specific fixed registers, all guaranteed distinct.
Register accumulator0();
Register accumulator1();
Register scratch0();
Register scratch1();
Register receiver_reg();
Register context_reg();
Register other_accumulator(Register reg) {
ASSERT(reg.is(accumulator0()) || reg.is(accumulator1()));
return (reg.is(accumulator0())) ? accumulator1() : accumulator0();
}
// Flags are true if the respective register is statically known to hold a
// smi. We do not track every register, only the accumulator registers.
bool is_smi(Register reg) {
ASSERT(!reg.is(no_reg));
return (smi_bits_ & reg.bit()) != 0;
}
void set_as_smi(Register reg) {
ASSERT(!reg.is(no_reg));
smi_bits_ = smi_bits_ | reg.bit();
}
void clear_as_smi(Register reg) {
ASSERT(!reg.is(no_reg));
smi_bits_ = smi_bits_ & ~reg.bit();
}
// AST node visit functions. // AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node); #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT) AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT #undef DECLARE_VISIT
// Emit code to load the receiver from the stack into a given register. // Emit code to load the receiver from the stack into receiver_reg.
void EmitLoadReceiver(Register reg); void EmitLoadReceiver();
// Emit code to check that the receiver has the same map as the // Emit code to load a global variable directly from a global property
// compile-time receiver. Receiver is expected in {ia32-edx, x64-rdx, // cell into the destination register.
// arm-r1}. Emit a branch to the (single) bailout label if check fails. void EmitGlobalVariableLoad(Handle<Object> cell);
void EmitReceiverMapCheck();
// Emit code to load a global variable value into {is32-eax, x64-rax,
// arm-r0}. Register {ia32-edx, x64-rdx, arm-r1} is preserved if it is
// holding the receiver and {is32-ecx, x64-rcx, arm-r2} is always
// clobbered.
void EmitGlobalVariableLoad(Handle<String> name);
// Emit a store to an own property of this. The stored value is expected // Emit a store to an own property of this. The stored value is expected
// in {ia32-eax, x64-rax, arm-r0} and the receiver in {is32-edx, x64-rdx, // in accumulator0 and the receiver in receiver_reg. The receiver
// arm-r1}. Both are preserve. // register is preserved and the result (the stored value) is left in the
// destination register.
void EmitThisPropertyStore(Handle<String> name); void EmitThisPropertyStore(Handle<String> name);
MacroAssembler* masm_; // Emit a load from an own property of this. The receiver is expected in
Handle<Script> script_; // receiver_reg. The receiver register is preserved and the result is
bool is_eval_; // left in the destination register.
void EmitThisPropertyLoad(Handle<String> name);
FunctionLiteral* function_; // Emit a bitwise or operation. The left operand is in accumulator1 and
CompilationInfo* info_; // the right is in accumulator0. The result should be left in the
// destination register.
void EmitBitOr();
MacroAssembler* masm_;
CompilationInfo* info_;
Label bailout_; Label bailout_;
Register destination_;
uint32_t smi_bits_;
DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator); DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
}; };

4
deps/v8/src/flag-definitions.h

@ -116,6 +116,8 @@ DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)") "enable use of SAHF instruction if available (X64 only)")
DEFINE_bool(enable_vfp3, true, DEFINE_bool(enable_vfp3, true,
"enable use of VFP3 instructions if available (ARM only)") "enable use of VFP3 instructions if available (ARM only)")
DEFINE_bool(enable_armv7, true,
"enable use of ARMv7 instructions if available (ARM only)")
// bootstrapper.cc // bootstrapper.cc
DEFINE_string(expose_natives_as, NULL, "expose natives in global object") DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
@ -218,7 +220,7 @@ DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
// rewriter.cc // rewriter.cc
DEFINE_bool(optimize_ast, true, "optimize the ast") DEFINE_bool(optimize_ast, true, "optimize the ast")
// simulator-arm.cc // simulator-arm.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "trace simulator execution") DEFINE_bool(trace_sim, false, "trace simulator execution")
DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")

4
deps/v8/src/frame-element.cc

@ -32,10 +32,6 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// -------------------------------------------------------------------------
// FrameElement implementation.
FrameElement::ZoneObjectList* FrameElement::ConstantList() { FrameElement::ZoneObjectList* FrameElement::ConstantList() {
static ZoneObjectList list(10); static ZoneObjectList list(10);
return &list; return &list;

43
deps/v8/src/frame-element.h

@ -28,7 +28,8 @@
#ifndef V8_FRAME_ELEMENT_H_ #ifndef V8_FRAME_ELEMENT_H_
#define V8_FRAME_ELEMENT_H_ #define V8_FRAME_ELEMENT_H_
#include "register-allocator-inl.h" #include "number-info.h"
#include "macro-assembler.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -52,11 +53,28 @@ class FrameElement BASE_EMBEDDED {
SYNCED SYNCED
}; };
inline NumberInfo::Type number_info() {
// Copied elements do not have number info. Instead
// we have to inspect their backing element in the frame.
ASSERT(!is_copy());
if (!is_constant()) return NumberInfoField::decode(value_);
Handle<Object> value = handle();
if (value->IsSmi()) return NumberInfo::kSmi;
if (value->IsHeapNumber()) return NumberInfo::kHeapNumber;
return NumberInfo::kUnknown;
}
inline void set_number_info(NumberInfo::Type info) {
value_ = value_ & ~NumberInfoField::mask();
value_ = value_ | NumberInfoField::encode(info);
}
// The default constructor creates an invalid frame element. // The default constructor creates an invalid frame element.
FrameElement() { FrameElement() {
value_ = TypeField::encode(INVALID) value_ = TypeField::encode(INVALID)
| CopiedField::encode(false) | CopiedField::encode(false)
| SyncedField::encode(false) | SyncedField::encode(false)
| NumberInfoField::encode(NumberInfo::kUninitialized)
| DataField::encode(0); | DataField::encode(0);
} }
@ -67,15 +85,16 @@ class FrameElement BASE_EMBEDDED {
} }
// Factory function to construct an in-memory frame element. // Factory function to construct an in-memory frame element.
static FrameElement MemoryElement() { static FrameElement MemoryElement(NumberInfo::Type info) {
FrameElement result(MEMORY, no_reg, SYNCED); FrameElement result(MEMORY, no_reg, SYNCED, info);
return result; return result;
} }
// Factory function to construct an in-register frame element. // Factory function to construct an in-register frame element.
static FrameElement RegisterElement(Register reg, static FrameElement RegisterElement(Register reg,
SyncFlag is_synced) { SyncFlag is_synced,
return FrameElement(REGISTER, reg, is_synced); NumberInfo::Type info) {
return FrameElement(REGISTER, reg, is_synced, info);
} }
// Factory function to construct a frame element whose value is known at // Factory function to construct a frame element whose value is known at
@ -185,10 +204,14 @@ class FrameElement BASE_EMBEDDED {
}; };
// Used to construct memory and register elements. // Used to construct memory and register elements.
FrameElement(Type type, Register reg, SyncFlag is_synced) { FrameElement(Type type,
Register reg,
SyncFlag is_synced,
NumberInfo::Type info) {
value_ = TypeField::encode(type) value_ = TypeField::encode(type)
| CopiedField::encode(false) | CopiedField::encode(false)
| SyncedField::encode(is_synced != NOT_SYNCED) | SyncedField::encode(is_synced != NOT_SYNCED)
| NumberInfoField::encode(info)
| DataField::encode(reg.code_ > 0 ? reg.code_ : 0); | DataField::encode(reg.code_ > 0 ? reg.code_ : 0);
} }
@ -197,6 +220,7 @@ class FrameElement BASE_EMBEDDED {
value_ = TypeField::encode(CONSTANT) value_ = TypeField::encode(CONSTANT)
| CopiedField::encode(false) | CopiedField::encode(false)
| SyncedField::encode(is_synced != NOT_SYNCED) | SyncedField::encode(is_synced != NOT_SYNCED)
| NumberInfoField::encode(NumberInfo::kUninitialized)
| DataField::encode(ConstantList()->length()); | DataField::encode(ConstantList()->length());
ConstantList()->Add(value); ConstantList()->Add(value);
} }
@ -223,9 +247,10 @@ class FrameElement BASE_EMBEDDED {
uint32_t value_; uint32_t value_;
class TypeField: public BitField<Type, 0, 3> {}; class TypeField: public BitField<Type, 0, 3> {};
class CopiedField: public BitField<uint32_t, 3, 1> {}; class CopiedField: public BitField<bool, 3, 1> {};
class SyncedField: public BitField<uint32_t, 4, 1> {}; class SyncedField: public BitField<bool, 4, 1> {};
class DataField: public BitField<uint32_t, 5, 32 - 6> {}; class NumberInfoField: public BitField<NumberInfo::Type, 5, 3> {};
class DataField: public BitField<uint32_t, 8, 32 - 9> {};
friend class VirtualFrame; friend class VirtualFrame;
}; };

2
deps/v8/src/frames-inl.h

@ -36,6 +36,8 @@
#include "x64/frames-x64.h" #include "x64/frames-x64.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/frames-arm.h" #include "arm/frames-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/frames-mips.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif

7
deps/v8/src/frames.cc

@ -408,12 +408,7 @@ Object*& ExitFrame::code_slot() const {
Code* ExitFrame::code() const { Code* ExitFrame::code() const {
Object* code = code_slot(); return Code::cast(code_slot());
if (code->IsSmi()) {
return Heap::debugger_statement_code();
} else {
return Code::cast(code);
}
} }

105
deps/v8/src/full-codegen.cc

@ -32,6 +32,7 @@
#include "full-codegen.h" #include "full-codegen.h"
#include "stub-cache.h" #include "stub-cache.h"
#include "debug.h" #include "debug.h"
#include "liveedit.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -439,24 +440,27 @@ void FullCodeGenSyntaxChecker::VisitThisFunction(ThisFunction* expr) {
#define __ ACCESS_MASM(masm()) #define __ ACCESS_MASM(masm())
Handle<Code> FullCodeGenerator::MakeCode(FunctionLiteral* fun, Handle<Code> FullCodeGenerator::MakeCode(CompilationInfo* info) {
Handle<Script> script, Handle<Script> script = info->script();
bool is_eval) {
if (!script->IsUndefined() && !script->source()->IsUndefined()) { if (!script->IsUndefined() && !script->source()->IsUndefined()) {
int len = String::cast(script->source())->length(); int len = String::cast(script->source())->length();
Counters::total_full_codegen_source_size.Increment(len); Counters::total_full_codegen_source_size.Increment(len);
} }
CodeGenerator::MakeCodePrologue(fun); CodeGenerator::MakeCodePrologue(info);
const int kInitialBufferSize = 4 * KB; const int kInitialBufferSize = 4 * KB;
MacroAssembler masm(NULL, kInitialBufferSize); MacroAssembler masm(NULL, kInitialBufferSize);
FullCodeGenerator cgen(&masm, script, is_eval); LiveEditFunctionTracker live_edit_tracker(info->function());
cgen.Generate(fun, PRIMARY);
FullCodeGenerator cgen(&masm);
cgen.Generate(info, PRIMARY);
if (cgen.HasStackOverflow()) { if (cgen.HasStackOverflow()) {
ASSERT(!Top::has_pending_exception()); ASSERT(!Top::has_pending_exception());
return Handle<Code>::null(); return Handle<Code>::null();
} }
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP); Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
return CodeGenerator::MakeCodeEpilogue(fun, &masm, flags, script); Handle<Code> result = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
live_edit_tracker.RecordFunctionCode(result);
return result;
} }
@ -467,7 +471,7 @@ int FullCodeGenerator::SlotOffset(Slot* slot) {
// Adjust by a (parameter or local) base offset. // Adjust by a (parameter or local) base offset.
switch (slot->type()) { switch (slot->type()) {
case Slot::PARAMETER: case Slot::PARAMETER:
offset += (function_->scope()->num_parameters() + 1) * kPointerSize; offset += (scope()->num_parameters() + 1) * kPointerSize;
break; break;
case Slot::LOCAL: case Slot::LOCAL:
offset += JavaScriptFrameConstants::kLocal0Offset; offset += JavaScriptFrameConstants::kLocal0Offset;
@ -520,7 +524,7 @@ void FullCodeGenerator::VisitDeclarations(
} }
} else { } else {
Handle<JSFunction> function = Handle<JSFunction> function =
Compiler::BuildBoilerplate(decl->fun(), script_, this); Compiler::BuildBoilerplate(decl->fun(), script(), this);
// Check for stack-overflow exception. // Check for stack-overflow exception.
if (HasStackOverflow()) return; if (HasStackOverflow()) return;
array->set(j++, *function); array->set(j++, *function);
@ -987,8 +991,7 @@ void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
Comment cmnt(masm_, "[ DebuggerStatement"); Comment cmnt(masm_, "[ DebuggerStatement");
SetStatementPosition(stmt); SetStatementPosition(stmt);
DebuggerStatementStub ces; __ DebugBreak();
__ CallStub(&ces);
// Ignore the return value. // Ignore the return value.
#endif #endif
} }
@ -1033,86 +1036,6 @@ void FullCodeGenerator::VisitLiteral(Literal* expr) {
} }
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
Comment cmnt(masm_, "[ Assignment");
ASSERT(expr->op() != Token::INIT_CONST);
// Left-hand side can only be a property, a global or a (parameter or local)
// slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->target()->AsProperty();
if (prop != NULL) {
assign_type =
(prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
}
// Evaluate LHS expression.
switch (assign_type) {
case VARIABLE:
// Nothing to do here.
break;
case NAMED_PROPERTY:
VisitForValue(prop->obj(), kStack);
break;
case KEYED_PROPERTY:
VisitForValue(prop->obj(), kStack);
VisitForValue(prop->key(), kStack);
break;
}
// If we have a compound assignment: Get value of LHS expression and
// store in on top of the stack.
if (expr->is_compound()) {
Location saved_location = location_;
location_ = kStack;
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
Expression::kValue);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(prop);
__ push(result_register());
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(prop);
__ push(result_register());
break;
}
location_ = saved_location;
}
// Evaluate RHS expression.
Expression* rhs = expr->value();
VisitForValue(rhs, kAccumulator);
// If we have a compound assignment: Apply operator.
if (expr->is_compound()) {
Location saved_location = location_;
location_ = kAccumulator;
EmitBinaryOp(expr->binary_op(), Expression::kValue);
location_ = saved_location;
}
// Record source position before possible IC call.
SetSourcePosition(expr->position());
// Store the value.
switch (assign_type) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
context_);
break;
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
break;
}
}
void FullCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) { void FullCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
// Call runtime routine to allocate the catch extension object and // Call runtime routine to allocate the catch extension object and
// assign the exception value to the catch variable. // assign the exception value to the catch variable.

23
deps/v8/src/full-codegen.h

@ -68,11 +68,9 @@ class FullCodeGenerator: public AstVisitor {
SECONDARY SECONDARY
}; };
FullCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval) explicit FullCodeGenerator(MacroAssembler* masm)
: masm_(masm), : masm_(masm),
script_(script), info_(NULL),
is_eval_(is_eval),
function_(NULL),
nesting_stack_(NULL), nesting_stack_(NULL),
loop_depth_(0), loop_depth_(0),
location_(kStack), location_(kStack),
@ -80,11 +78,9 @@ class FullCodeGenerator: public AstVisitor {
false_label_(NULL) { false_label_(NULL) {
} }
static Handle<Code> MakeCode(FunctionLiteral* fun, static Handle<Code> MakeCode(CompilationInfo* info);
Handle<Script> script,
bool is_eval);
void Generate(FunctionLiteral* fun, Mode mode); void Generate(CompilationInfo* info, Mode mode);
private: private:
class Breakable; class Breakable;
@ -408,6 +404,12 @@ class FullCodeGenerator: public AstVisitor {
} }
MacroAssembler* masm() { return masm_; } MacroAssembler* masm() { return masm_; }
Handle<Script> script() { return info_->script(); }
bool is_eval() { return info_->is_eval(); }
FunctionLiteral* function() { return info_->function(); }
Scope* scope() { return info_->scope(); }
static Register result_register(); static Register result_register();
static Register context_register(); static Register context_register();
@ -427,10 +429,7 @@ class FullCodeGenerator: public AstVisitor {
void EmitLogicalOperation(BinaryOperation* expr); void EmitLogicalOperation(BinaryOperation* expr);
MacroAssembler* masm_; MacroAssembler* masm_;
Handle<Script> script_; CompilationInfo* info_;
bool is_eval_;
FunctionLiteral* function_;
Label return_label_; Label return_label_;
NestedStatement* nesting_stack_; NestedStatement* nesting_stack_;

5
deps/v8/src/globals.h

@ -46,6 +46,9 @@ namespace internal {
#elif defined(__ARMEL__) #elif defined(__ARMEL__)
#define V8_HOST_ARCH_ARM 1 #define V8_HOST_ARCH_ARM 1
#define V8_HOST_ARCH_32_BIT 1 #define V8_HOST_ARCH_32_BIT 1
#elif defined(_MIPS_ARCH_MIPS32R2)
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
#else #else
#error Your host architecture was not detected as supported by v8 #error Your host architecture was not detected as supported by v8
#endif #endif
@ -53,6 +56,7 @@ namespace internal {
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32) #if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
#define V8_TARGET_CAN_READ_UNALIGNED 1 #define V8_TARGET_CAN_READ_UNALIGNED 1
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#elif V8_TARGET_ARCH_MIPS
#else #else
#error Your target architecture is not supported by v8 #error Your target architecture is not supported by v8
#endif #endif
@ -608,6 +612,7 @@ enum CpuFeature { SSE3 = 32, // x86
RDTSC = 4, // x86 RDTSC = 4, // x86
CPUID = 10, // x86 CPUID = 10, // x86
VFP3 = 1, // ARM VFP3 = 1, // ARM
ARMv7 = 2, // ARM
SAHF = 0}; // x86 SAHF = 0}; // x86
} } // namespace v8::internal } } // namespace v8::internal

47
deps/v8/src/handles.cc

@ -300,6 +300,12 @@ Handle<Object> GetPrototype(Handle<Object> obj) {
} }
Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value) {
const bool skip_hidden_prototypes = false;
CALL_HEAP_FUNCTION(obj->SetPrototype(*value, skip_hidden_prototypes), Object);
}
Handle<Object> GetHiddenProperties(Handle<JSObject> obj, Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
bool create_if_needed) { bool create_if_needed) {
Object* holder = obj->BypassGlobalProxy(); Object* holder = obj->BypassGlobalProxy();
@ -477,25 +483,25 @@ void InitScriptLineEnds(Handle<Script> script) {
int GetScriptLineNumber(Handle<Script> script, int code_pos) { int GetScriptLineNumber(Handle<Script> script, int code_pos) {
InitScriptLineEnds(script); InitScriptLineEnds(script);
AssertNoAllocation no_allocation; AssertNoAllocation no_allocation;
FixedArray* line_ends_array = FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
FixedArray::cast(script->line_ends());
const int line_ends_len = line_ends_array->length(); const int line_ends_len = line_ends_array->length();
int line = -1; if (!line_ends_len)
if (line_ends_len > 0 && return -1;
code_pos <= (Smi::cast(line_ends_array->get(0)))->value()) {
line = 0; if ((Smi::cast(line_ends_array->get(0)))->value() >= code_pos)
} else { return script->line_offset()->value();
for (int i = 1; i < line_ends_len; ++i) {
if ((Smi::cast(line_ends_array->get(i - 1)))->value() < code_pos && int left = 0;
code_pos <= (Smi::cast(line_ends_array->get(i)))->value()) { int right = line_ends_len;
line = i; while (int half = (right - left) / 2) {
break; if ((Smi::cast(line_ends_array->get(left + half)))->value() > code_pos) {
} right -= half;
} else {
left += half;
} }
} }
return right + script->line_offset()->value();
return line != -1 ? line + script->line_offset()->value() : line;
} }
@ -686,7 +692,7 @@ static bool CompileLazyHelper(CompilationInfo* info,
bool CompileLazyShared(Handle<SharedFunctionInfo> shared, bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag) { ClearExceptionFlag flag) {
CompilationInfo info(shared, Handle<Object>::null(), 0); CompilationInfo info(shared);
return CompileLazyHelper(&info, flag); return CompileLazyHelper(&info, flag);
} }
@ -694,8 +700,7 @@ bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
bool CompileLazy(Handle<JSFunction> function, bool CompileLazy(Handle<JSFunction> function,
Handle<Object> receiver, Handle<Object> receiver,
ClearExceptionFlag flag) { ClearExceptionFlag flag) {
Handle<SharedFunctionInfo> shared(function->shared()); CompilationInfo info(function, 0, receiver);
CompilationInfo info(shared, receiver, 0);
bool result = CompileLazyHelper(&info, flag); bool result = CompileLazyHelper(&info, flag);
LOG(FunctionCreateEvent(*function)); LOG(FunctionCreateEvent(*function));
return result; return result;
@ -705,8 +710,7 @@ bool CompileLazy(Handle<JSFunction> function,
bool CompileLazyInLoop(Handle<JSFunction> function, bool CompileLazyInLoop(Handle<JSFunction> function,
Handle<Object> receiver, Handle<Object> receiver,
ClearExceptionFlag flag) { ClearExceptionFlag flag) {
Handle<SharedFunctionInfo> shared(function->shared()); CompilationInfo info(function, 1, receiver);
CompilationInfo info(shared, receiver, 1);
bool result = CompileLazyHelper(&info, flag); bool result = CompileLazyHelper(&info, flag);
LOG(FunctionCreateEvent(*function)); LOG(FunctionCreateEvent(*function));
return result; return result;
@ -766,7 +770,8 @@ void LoadLazy(Handle<JSObject> obj, bool* pending_exception) {
Handle<String> script_name = Factory::NewStringFromAscii(name); Handle<String> script_name = Factory::NewStringFromAscii(name);
bool allow_natives_syntax = FLAG_allow_natives_syntax; bool allow_natives_syntax = FLAG_allow_natives_syntax;
FLAG_allow_natives_syntax = true; FLAG_allow_natives_syntax = true;
boilerplate = Compiler::Compile(source_code, script_name, 0, 0, NULL, NULL); boilerplate = Compiler::Compile(source_code, script_name, 0, 0, NULL, NULL,
Handle<String>::null());
FLAG_allow_natives_syntax = allow_natives_syntax; FLAG_allow_natives_syntax = allow_natives_syntax;
// If the compilation failed (possibly due to stack overflows), we // If the compilation failed (possibly due to stack overflows), we
// should never enter the result in the natives cache. Instead we // should never enter the result in the natives cache. Instead we

2
deps/v8/src/handles.h

@ -240,6 +240,8 @@ Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
Handle<Object> GetPrototype(Handle<Object> obj); Handle<Object> GetPrototype(Handle<Object> obj);
Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value);
// Return the object's hidden properties object. If the object has no hidden // Return the object's hidden properties object. If the object has no hidden
// properties and create_if_needed is true, then a new hidden property object // properties and create_if_needed is true, then a new hidden property object
// will be allocated. Otherwise the Heap::undefined_value is returned. // will be allocated. Otherwise the Heap::undefined_value is returned.

19
deps/v8/src/heap.cc

@ -1498,12 +1498,6 @@ void Heap::CreateRegExpCEntryStub() {
#endif #endif
void Heap::CreateCEntryDebugBreakStub() {
DebuggerStatementStub stub;
set_debugger_statement_code(*stub.GetCode());
}
void Heap::CreateJSEntryStub() { void Heap::CreateJSEntryStub() {
JSEntryStub stub; JSEntryStub stub;
set_js_entry_code(*stub.GetCode()); set_js_entry_code(*stub.GetCode());
@ -1531,7 +1525,6 @@ void Heap::CreateFixedStubs() {
// } // }
// To workaround the problem, make separate functions without inlining. // To workaround the problem, make separate functions without inlining.
Heap::CreateCEntryStub(); Heap::CreateCEntryStub();
Heap::CreateCEntryDebugBreakStub();
Heap::CreateJSEntryStub(); Heap::CreateJSEntryStub();
Heap::CreateJSConstructEntryStub(); Heap::CreateJSConstructEntryStub();
#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP #if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
@ -1774,6 +1767,7 @@ Object* Heap::SmiOrNumberFromDouble(double value,
Object* Heap::NumberToString(Object* number) { Object* Heap::NumberToString(Object* number) {
Counters::number_to_string_runtime.Increment();
Object* cached = GetNumberStringCache(number); Object* cached = GetNumberStringCache(number);
if (cached != undefined_value()) { if (cached != undefined_value()) {
return cached; return cached;
@ -2389,12 +2383,13 @@ Object* Heap::AllocateInitialMap(JSFunction* fun) {
map->set_unused_property_fields(in_object_properties); map->set_unused_property_fields(in_object_properties);
map->set_prototype(prototype); map->set_prototype(prototype);
// If the function has only simple this property assignments add field // If the function has only simple this property assignments add
// descriptors for these to the initial map as the object cannot be // field descriptors for these to the initial map as the object
// constructed without having these properties. // cannot be constructed without having these properties. Guard by
// the inline_new flag so we only change the map if we generate a
// specialized construct stub.
ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields); ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
if (fun->shared()->has_only_simple_this_property_assignments() && if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
fun->shared()->this_property_assignments_count() > 0) {
int count = fun->shared()->this_property_assignments_count(); int count = fun->shared()->this_property_assignments_count();
if (count > in_object_properties) { if (count > in_object_properties) {
count = in_object_properties; count = in_object_properties;

2
deps/v8/src/heap.h

@ -101,7 +101,6 @@ namespace internal {
V(Code, js_entry_code, JsEntryCode) \ V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode) \ V(Code, js_construct_entry_code, JsConstructEntryCode) \
V(Code, c_entry_code, CEntryCode) \ V(Code, c_entry_code, CEntryCode) \
V(Code, debugger_statement_code, DebuggerStatementCode) \
V(FixedArray, number_string_cache, NumberStringCache) \ V(FixedArray, number_string_cache, NumberStringCache) \
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \ V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, natives_source_cache, NativesSourceCache) \ V(FixedArray, natives_source_cache, NativesSourceCache) \
@ -1046,7 +1045,6 @@ class Heap : public AllStatic {
// These four Create*EntryStub functions are here because of a gcc-4.4 bug // These four Create*EntryStub functions are here because of a gcc-4.4 bug
// that assigns wrong vtable entries. // that assigns wrong vtable entries.
static void CreateCEntryStub(); static void CreateCEntryStub();
static void CreateCEntryDebugBreakStub();
static void CreateJSEntryStub(); static void CreateJSEntryStub();
static void CreateJSConstructEntryStub(); static void CreateJSConstructEntryStub();
static void CreateRegExpCEntryStub(); static void CreateRegExpCEntryStub();

59
deps/v8/src/ia32/assembler-ia32.cc

@ -267,7 +267,7 @@ bool Operand::is_reg(Register reg) const {
} }
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Implementation of Assembler // Implementation of Assembler.
// Emit a single byte. Must always be inlined. // Emit a single byte. Must always be inlined.
#define EMIT(x) \ #define EMIT(x) \
@ -278,12 +278,12 @@ bool Operand::is_reg(Register reg) const {
static void InitCoverageLog(); static void InitCoverageLog();
#endif #endif
// spare_buffer_ // Spare buffer.
byte* Assembler::spare_buffer_ = NULL; byte* Assembler::spare_buffer_ = NULL;
Assembler::Assembler(void* buffer, int buffer_size) { Assembler::Assembler(void* buffer, int buffer_size) {
if (buffer == NULL) { if (buffer == NULL) {
// do our own buffer management // Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) { if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize; buffer_size = kMinimalBufferSize;
@ -300,7 +300,7 @@ Assembler::Assembler(void* buffer, int buffer_size) {
buffer_size_ = buffer_size; buffer_size_ = buffer_size;
own_buffer_ = true; own_buffer_ = true;
} else { } else {
// use externally provided buffer instead // Use externally provided buffer instead.
ASSERT(buffer_size > 0); ASSERT(buffer_size > 0);
buffer_ = static_cast<byte*>(buffer); buffer_ = static_cast<byte*>(buffer);
buffer_size_ = buffer_size; buffer_size_ = buffer_size;
@ -316,7 +316,7 @@ Assembler::Assembler(void* buffer, int buffer_size) {
} }
#endif #endif
// setup buffer pointers // Setup buffer pointers.
ASSERT(buffer_ != NULL); ASSERT(buffer_ != NULL);
pc_ = buffer_; pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@ -344,11 +344,10 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) { void Assembler::GetCode(CodeDesc* desc) {
// finalize code // Finalize code (at this point overflow() may be true, but the gap ensures
// (at this point overflow() may be true, but the gap ensures that // that we are still not overlapping instructions and relocation info).
// we are still not overlapping instructions and relocation info) ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
ASSERT(pc_ <= reloc_info_writer.pos()); // no overlap // Setup code descriptor.
// setup desc
desc->buffer = buffer_; desc->buffer = buffer_;
desc->buffer_size = buffer_size_; desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset(); desc->instr_size = pc_offset();
@ -435,7 +434,7 @@ void Assembler::push(const Operand& src) {
void Assembler::pop(Register dst) { void Assembler::pop(Register dst) {
ASSERT(reloc_info_writer.last_pc() != NULL); ASSERT(reloc_info_writer.last_pc() != NULL);
if (FLAG_push_pop_elimination && (reloc_info_writer.last_pc() <= last_pc_)) { if (FLAG_push_pop_elimination && (reloc_info_writer.last_pc() <= last_pc_)) {
// (last_pc_ != NULL) is rolled into the above check // (last_pc_ != NULL) is rolled into the above check.
// If a last_pc_ is set, we need to make sure that there has not been any // If a last_pc_ is set, we need to make sure that there has not been any
// relocation information generated between the last instruction and this // relocation information generated between the last instruction and this
// pop instruction. // pop instruction.
@ -461,7 +460,7 @@ void Assembler::pop(Register dst) {
return; return;
} else if (instr == 0xff) { // push of an operand, convert to a move } else if (instr == 0xff) { // push of an operand, convert to a move
byte op1 = last_pc_[1]; byte op1 = last_pc_[1];
// Check if the operation is really a push // Check if the operation is really a push.
if ((op1 & 0x38) == (6 << 3)) { if ((op1 & 0x38) == (6 << 3)) {
op1 = (op1 & ~0x38) | static_cast<byte>(dst.code() << 3); op1 = (op1 & ~0x38) | static_cast<byte>(dst.code() << 3);
last_pc_[0] = 0x8b; last_pc_[0] = 0x8b;
@ -747,7 +746,7 @@ void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(CMOV)); ASSERT(CpuFeatures::IsEnabled(CMOV));
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
// Opcode: 0f 40 + cc /r // Opcode: 0f 40 + cc /r.
EMIT(0x0F); EMIT(0x0F);
EMIT(0x40 + cc); EMIT(0x40 + cc);
emit_operand(dst, src); emit_operand(dst, src);
@ -765,7 +764,7 @@ void Assembler::rep_movs() {
void Assembler::xchg(Register dst, Register src) { void Assembler::xchg(Register dst, Register src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
if (src.is(eax) || dst.is(eax)) { // Single-byte encoding if (src.is(eax) || dst.is(eax)) { // Single-byte encoding.
EMIT(0x90 | (src.is(eax) ? dst.code() : src.code())); EMIT(0x90 | (src.is(eax) ? dst.code() : src.code()));
} else { } else {
EMIT(0x87); EMIT(0x87);
@ -1434,7 +1433,7 @@ void Assembler::bind_to(Label* L, int pos) {
if (disp.type() == Displacement::UNCONDITIONAL_JUMP) { if (disp.type() == Displacement::UNCONDITIONAL_JUMP) {
ASSERT(byte_at(fixup_pos - 1) == 0xE9); // jmp expected ASSERT(byte_at(fixup_pos - 1) == 0xE9); // jmp expected
} }
// relative address, relative to point after address // Relative address, relative to point after address.
int imm32 = pos - (fixup_pos + sizeof(int32_t)); int imm32 = pos - (fixup_pos + sizeof(int32_t));
long_at_put(fixup_pos, imm32); long_at_put(fixup_pos, imm32);
} }
@ -1449,7 +1448,7 @@ void Assembler::link_to(Label* L, Label* appendix) {
last_pc_ = NULL; last_pc_ = NULL;
if (appendix->is_linked()) { if (appendix->is_linked()) {
if (L->is_linked()) { if (L->is_linked()) {
// append appendix to L's list // Append appendix to L's list.
Label p; Label p;
Label q = *L; Label q = *L;
do { do {
@ -1462,7 +1461,7 @@ void Assembler::link_to(Label* L, Label* appendix) {
disp_at_put(&p, disp); disp_at_put(&p, disp);
p.Unuse(); // to avoid assertion failure in ~Label p.Unuse(); // to avoid assertion failure in ~Label
} else { } else {
// L is empty, simply use appendix // L is empty, simply use appendix.
*L = *appendix; *L = *appendix;
} }
} }
@ -1485,11 +1484,11 @@ void Assembler::call(Label* L) {
const int long_size = 5; const int long_size = 5;
int offs = L->pos() - pc_offset(); int offs = L->pos() - pc_offset();
ASSERT(offs <= 0); ASSERT(offs <= 0);
// 1110 1000 #32-bit disp // 1110 1000 #32-bit disp.
EMIT(0xE8); EMIT(0xE8);
emit(offs - long_size); emit(offs - long_size);
} else { } else {
// 1110 1000 #32-bit disp // 1110 1000 #32-bit disp.
EMIT(0xE8); EMIT(0xE8);
emit_disp(L, Displacement::OTHER); emit_disp(L, Displacement::OTHER);
} }
@ -1532,16 +1531,16 @@ void Assembler::jmp(Label* L) {
int offs = L->pos() - pc_offset(); int offs = L->pos() - pc_offset();
ASSERT(offs <= 0); ASSERT(offs <= 0);
if (is_int8(offs - short_size)) { if (is_int8(offs - short_size)) {
// 1110 1011 #8-bit disp // 1110 1011 #8-bit disp.
EMIT(0xEB); EMIT(0xEB);
EMIT((offs - short_size) & 0xFF); EMIT((offs - short_size) & 0xFF);
} else { } else {
// 1110 1001 #32-bit disp // 1110 1001 #32-bit disp.
EMIT(0xE9); EMIT(0xE9);
emit(offs - long_size); emit(offs - long_size);
} }
} else { } else {
// 1110 1001 #32-bit disp // 1110 1001 #32-bit disp.
EMIT(0xE9); EMIT(0xE9);
emit_disp(L, Displacement::UNCONDITIONAL_JUMP); emit_disp(L, Displacement::UNCONDITIONAL_JUMP);
} }
@ -1611,7 +1610,7 @@ void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint) {
last_pc_ = pc_; last_pc_ = pc_;
ASSERT((0 <= cc) && (cc < 16)); ASSERT((0 <= cc) && (cc < 16));
if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint); if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
// 0000 1111 1000 tttn #32-bit disp // 0000 1111 1000 tttn #32-bit disp.
EMIT(0x0F); EMIT(0x0F);
EMIT(0x80 | cc); EMIT(0x80 | cc);
emit(entry - (pc_ + sizeof(int32_t)), rmode); emit(entry - (pc_ + sizeof(int32_t)), rmode);
@ -1629,7 +1628,7 @@ void Assembler::j(Condition cc, Handle<Code> code, Hint hint) {
} }
// FPU instructions // FPU instructions.
void Assembler::fld(int i) { void Assembler::fld(int i) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
@ -2225,10 +2224,10 @@ void Assembler::WriteRecordedPositions() {
void Assembler::GrowBuffer() { void Assembler::GrowBuffer() {
ASSERT(overflow()); // should not call this otherwise ASSERT(overflow());
if (!own_buffer_) FATAL("external code buffer is too small"); if (!own_buffer_) FATAL("external code buffer is too small");
// compute new buffer size // Compute new buffer size.
CodeDesc desc; // the new buffer CodeDesc desc; // the new buffer
if (buffer_size_ < 4*KB) { if (buffer_size_ < 4*KB) {
desc.buffer_size = 4*KB; desc.buffer_size = 4*KB;
@ -2242,7 +2241,7 @@ void Assembler::GrowBuffer() {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer"); V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
} }
// setup new buffer // Setup new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size); desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset(); desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos()); desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
@ -2253,14 +2252,14 @@ void Assembler::GrowBuffer() {
memset(desc.buffer, 0xCC, desc.buffer_size); memset(desc.buffer, 0xCC, desc.buffer_size);
#endif #endif
// copy the data // Copy the data.
int pc_delta = desc.buffer - buffer_; int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_); int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
memmove(desc.buffer, buffer_, desc.instr_size); memmove(desc.buffer, buffer_, desc.instr_size);
memmove(rc_delta + reloc_info_writer.pos(), memmove(rc_delta + reloc_info_writer.pos(),
reloc_info_writer.pos(), desc.reloc_size); reloc_info_writer.pos(), desc.reloc_size);
// switch buffers // Switch buffers.
if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) { if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
spare_buffer_ = buffer_; spare_buffer_ = buffer_;
} else { } else {
@ -2275,7 +2274,7 @@ void Assembler::GrowBuffer() {
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta); reloc_info_writer.last_pc() + pc_delta);
// relocate runtime entries // Relocate runtime entries.
for (RelocIterator it(desc); !it.done(); it.next()) { for (RelocIterator it(desc); !it.done(); it.next()) {
RelocInfo::Mode rmode = it.rinfo()->rmode(); RelocInfo::Mode rmode = it.rinfo()->rmode();
if (rmode == RelocInfo::RUNTIME_ENTRY) { if (rmode == RelocInfo::RUNTIME_ENTRY) {

5
deps/v8/src/ia32/assembler-ia32.h

@ -77,7 +77,7 @@ struct Register {
return 1 << code_; return 1 << code_;
} }
// (unfortunately we can't make this private in a struct) // Unfortunately we can't make this private in a struct.
int code_; int code_;
}; };
@ -231,7 +231,8 @@ enum ScaleFactor {
times_8 = 3, times_8 = 3,
times_int_size = times_4, times_int_size = times_4,
times_half_pointer_size = times_2, times_half_pointer_size = times_2,
times_pointer_size = times_4 times_pointer_size = times_4,
times_twice_pointer_size = times_8
}; };

135
deps/v8/src/ia32/builtins-ia32.cc

@ -93,7 +93,10 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// edi: called object // edi: called object
// eax: number of arguments // eax: number of arguments
__ bind(&non_function_call); __ bind(&non_function_call);
// CALL_NON_FUNCTION expects the non-function constructor as receiver
// (instead of the original receiver from the call site). The receiver is
// stack element argc+1.
__ mov(Operand(esp, eax, times_4, kPointerSize), edi);
// Set expected number of arguments to zero (not changing eax). // Set expected number of arguments to zero (not changing eax).
__ Set(ebx, Immediate(0)); __ Set(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
@ -437,33 +440,26 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ bind(&done); __ bind(&done);
} }
// 2. Get the function to call from the stack. // 2. Get the function to call (passed as receiver) from the stack, check
{ Label done, non_function, function; // if it is a function.
// +1 ~ return address. Label non_function;
__ mov(edi, Operand(esp, eax, times_4, +1 * kPointerSize)); // 1 ~ return address.
__ test(edi, Immediate(kSmiTagMask)); __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
__ j(zero, &non_function, not_taken); __ test(edi, Immediate(kSmiTagMask));
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); __ j(zero, &non_function, not_taken);
__ j(equal, &function, taken); __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &non_function, not_taken);
// Non-function called: Clear the function to force exception.
__ bind(&non_function);
__ xor_(edi, Operand(edi));
__ jmp(&done);
// Function called: Change context eagerly to get the right global object.
__ bind(&function);
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
__ bind(&done);
}
// 3. Make sure first argument is an object; convert if necessary. // 3a. Patch the first argument if necessary when calling a function.
{ Label call_to_object, use_global_receiver, patch_receiver, done; Label shift_arguments;
__ mov(ebx, Operand(esp, eax, times_4, 0)); { Label convert_to_object, use_global_receiver, patch_receiver;
// Change context eagerly in case we need the global receiver.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
__ mov(ebx, Operand(esp, eax, times_4, 0)); // First argument.
__ test(ebx, Immediate(kSmiTagMask)); __ test(ebx, Immediate(kSmiTagMask));
__ j(zero, &call_to_object); __ j(zero, &convert_to_object);
__ cmp(ebx, Factory::null_value()); __ cmp(ebx, Factory::null_value());
__ j(equal, &use_global_receiver); __ j(equal, &use_global_receiver);
@ -473,31 +469,28 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset)); __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ cmp(ecx, FIRST_JS_OBJECT_TYPE); __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
__ j(less, &call_to_object); __ j(below, &convert_to_object);
__ cmp(ecx, LAST_JS_OBJECT_TYPE); __ cmp(ecx, LAST_JS_OBJECT_TYPE);
__ j(less_equal, &done); __ j(below_equal, &shift_arguments);
__ bind(&call_to_object); __ bind(&convert_to_object);
__ EnterInternalFrame(); // preserves eax, ebx, edi __ EnterInternalFrame(); // In order to preserve argument count.
// Store the arguments count on the stack (smi tagged).
__ SmiTag(eax); __ SmiTag(eax);
__ push(eax); __ push(eax);
__ push(edi); // save edi across the call
__ push(ebx); __ push(ebx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ mov(ebx, eax); __ mov(ebx, eax);
__ pop(edi); // restore edi after the call
// Get the arguments count and untag it.
__ pop(eax); __ pop(eax);
__ SmiUntag(eax); __ SmiUntag(eax);
__ LeaveInternalFrame(); __ LeaveInternalFrame();
// Restore the function to edi.
__ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
__ jmp(&patch_receiver); __ jmp(&patch_receiver);
// Use the global receiver object from the called function as the receiver. // Use the global receiver object from the called function as the
// receiver.
__ bind(&use_global_receiver); __ bind(&use_global_receiver);
const int kGlobalIndex = const int kGlobalIndex =
Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
@ -509,50 +502,55 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ bind(&patch_receiver); __ bind(&patch_receiver);
__ mov(Operand(esp, eax, times_4, 0), ebx); __ mov(Operand(esp, eax, times_4, 0), ebx);
__ bind(&done); __ jmp(&shift_arguments);
} }
// 4. Check that the function really is a function. // 3b. Patch the first argument when calling a non-function. The
{ Label done; // CALL_NON_FUNCTION builtin expects the non-function callee as
__ test(edi, Operand(edi)); // receiver, so overwrite the first argument which will ultimately
__ j(not_zero, &done, taken); // become the receiver.
__ xor_(ebx, Operand(ebx)); __ bind(&non_function);
// CALL_NON_FUNCTION will expect to find the non-function callee on the __ mov(Operand(esp, eax, times_4, 0), edi);
// expression stack of the caller. Transfer it from receiver to the // Clear edi to indicate a non-function being called.
// caller's expression stack (and make the first argument the receiver __ xor_(edi, Operand(edi));
// for CALL_NON_FUNCTION) by decrementing the argument count.
__ dec(eax); // 4. Shift arguments and return address one slot down on the stack
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION); // (overwriting the original receiver). Adjust argument count to make
__ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)), // the original first argument the new receiver.
RelocInfo::CODE_TARGET); __ bind(&shift_arguments);
__ bind(&done);
}
// 5. Shift arguments and return address one slot down on the stack
// (overwriting the receiver).
{ Label loop; { Label loop;
__ mov(ecx, eax); __ mov(ecx, eax);
__ bind(&loop); __ bind(&loop);
__ mov(ebx, Operand(esp, ecx, times_4, 0)); __ mov(ebx, Operand(esp, ecx, times_4, 0));
__ mov(Operand(esp, ecx, times_4, kPointerSize), ebx); __ mov(Operand(esp, ecx, times_4, kPointerSize), ebx);
__ dec(ecx); __ dec(ecx);
__ j(not_sign, &loop); __ j(not_sign, &loop); // While non-negative (to copy return address).
__ pop(ebx); // Discard copy of return address. __ pop(ebx); // Discard copy of return address.
__ dec(eax); // One fewer argument (first argument is new receiver). __ dec(eax); // One fewer argument (first argument is new receiver).
} }
// 6. Get the code to call from the function and check that the number of // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
// expected arguments matches what we're providing. { Label function;
{ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); __ test(edi, Operand(edi));
__ mov(ebx, __ j(not_zero, &function, taken);
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); __ xor_(ebx, Operand(ebx));
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
__ lea(edx, FieldOperand(edx, Code::kHeaderSize)); __ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
__ cmp(eax, Operand(ebx)); RelocInfo::CODE_TARGET);
__ j(not_equal, Handle<Code>(builtin(ArgumentsAdaptorTrampoline))); __ bind(&function);
} }
// 7. Jump (tail-call) to the code in register edx without checking arguments. // 5b. Get the code to call from the function and check that the number of
// expected arguments matches what we're providing. If so, jump
// (tail-call) to the code in register edx without checking arguments.
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx,
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
__ cmp(eax, Operand(ebx));
__ j(not_equal, Handle<Code>(builtin(ArgumentsAdaptorTrampoline)));
ParameterCount expected(0); ParameterCount expected(0);
__ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION); __ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION);
} }
@ -647,9 +645,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ mov(eax, Operand(ebp, kIndexOffset)); __ mov(eax, Operand(ebp, kIndexOffset));
__ jmp(&entry); __ jmp(&entry);
__ bind(&loop); __ bind(&loop);
__ mov(ecx, Operand(ebp, 2 * kPointerSize)); // load arguments __ mov(edx, Operand(ebp, 2 * kPointerSize)); // load arguments
__ push(ecx);
__ push(eax);
// Use inline caching to speed up access to arguments. // Use inline caching to speed up access to arguments.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
@ -659,8 +655,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// we have generated an inline version of the keyed load. In this // we have generated an inline version of the keyed load. In this
// case, we know that we are not generating a test instruction next. // case, we know that we are not generating a test instruction next.
// Remove IC arguments from the stack and push the nth argument. // Push the nth argument.
__ add(Operand(esp), Immediate(2 * kPointerSize));
__ push(eax); __ push(eax);
// Update the index on the stack and in register eax. // Update the index on the stack and in register eax.

1644
deps/v8/src/ia32/codegen-ia32.cc

File diff suppressed because it is too large

161
deps/v8/src/ia32/codegen-ia32.h

@ -305,19 +305,15 @@ class CodeGenerator: public AstVisitor {
// Takes a function literal, generates code for it. This function should only // Takes a function literal, generates code for it. This function should only
// be called by compiler.cc. // be called by compiler.cc.
static Handle<Code> MakeCode(FunctionLiteral* fun, static Handle<Code> MakeCode(CompilationInfo* info);
Handle<Script> script,
bool is_eval,
CompilationInfo* info);
// Printing of AST, etc. as requested by flags. // Printing of AST, etc. as requested by flags.
static void MakeCodePrologue(FunctionLiteral* fun); static void MakeCodePrologue(CompilationInfo* info);
// Allocate and install the code. // Allocate and install the code.
static Handle<Code> MakeCodeEpilogue(FunctionLiteral* fun, static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
MacroAssembler* masm,
Code::Flags flags, Code::Flags flags,
Handle<Script> script); CompilationInfo* info);
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type); static bool ShouldGenerateLog(Expression* type);
@ -328,7 +324,7 @@ class CodeGenerator: public AstVisitor {
// Accessors // Accessors
MacroAssembler* masm() { return masm_; } MacroAssembler* masm() { return masm_; }
VirtualFrame* frame() const { return frame_; } VirtualFrame* frame() const { return frame_; }
Handle<Script> script() { return script_; } inline Handle<Script> script();
bool has_valid_frame() const { return frame_ != NULL; } bool has_valid_frame() const { return frame_ != NULL; }
@ -352,11 +348,11 @@ class CodeGenerator: public AstVisitor {
private: private:
// Construction/Destruction // Construction/Destruction
CodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval); explicit CodeGenerator(MacroAssembler* masm);
// Accessors // Accessors
Scope* scope() const { return scope_; } inline bool is_eval();
bool is_eval() { return is_eval_; } Scope* scope();
// Generating deferred code. // Generating deferred code.
void ProcessDeferred(); void ProcessDeferred();
@ -388,7 +384,7 @@ class CodeGenerator: public AstVisitor {
void VisitStatementsAndSpill(ZoneList<Statement*>* statements); void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
// Main code generation function // Main code generation function
void Generate(FunctionLiteral* fun, Mode mode, CompilationInfo* info); void Generate(CompilationInfo* info, Mode mode);
// Generate the return sequence code. Should be called no more than // Generate the return sequence code. Should be called no more than
// once per compiled function, immediately after binding the return // once per compiled function, immediately after binding the return
@ -396,7 +392,7 @@ class CodeGenerator: public AstVisitor {
void GenerateReturnSequence(Result* return_value); void GenerateReturnSequence(Result* return_value);
// Returns the arguments allocation mode. // Returns the arguments allocation mode.
ArgumentsAllocationMode ArgumentsMode() const; ArgumentsAllocationMode ArgumentsMode();
// Store the arguments object and allocate it if necessary. // Store the arguments object and allocate it if necessary.
Result StoreArgumentsObject(bool initial); Result StoreArgumentsObject(bool initial);
@ -433,8 +429,8 @@ class CodeGenerator: public AstVisitor {
void LoadAndSpill(Expression* expression); void LoadAndSpill(Expression* expression);
// Read a value from a slot and leave it on top of the expression stack. // Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state); Result LoadFromSlot(Slot* slot, TypeofState typeof_state);
void LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state); Result LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
Result LoadFromGlobalSlotCheckExtensions(Slot* slot, Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state, TypeofState typeof_state,
JumpTarget* slow); JumpTarget* slow);
@ -443,10 +439,22 @@ class CodeGenerator: public AstVisitor {
// value in place. // value in place.
void StoreToSlot(Slot* slot, InitState init_state); void StoreToSlot(Slot* slot, InitState init_state);
// Load a property of an object, returning it in a Result. // Support for compiling assignment expressions.
// The object and the property name are passed on the stack, and void EmitSlotAssignment(Assignment* node);
// not changed. void EmitNamedPropertyAssignment(Assignment* node);
Result EmitKeyedLoad(bool is_global); void EmitKeyedPropertyAssignment(Assignment* node);
// Receiver is passed on the frame and consumed.
Result EmitNamedLoad(Handle<String> name, bool is_contextual);
// Reciever and value are passed on the frame and consumed.
Result EmitNamedStore(Handle<String> name);
// Receiver and key are passed on the frame and consumed.
Result EmitKeyedLoad();
// Receiver, key, and value are passed on the frame and consumed.
Result EmitKeyedStore(StaticType* key_type);
// Special code for typeof expressions: Unfortunately, we must // Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof' // be careful when loading the expression in 'typeof'
@ -537,7 +545,7 @@ class CodeGenerator: public AstVisitor {
void DeclareGlobals(Handle<FixedArray> pairs); void DeclareGlobals(Handle<FixedArray> pairs);
// Instantiate the function boilerplate. // Instantiate the function boilerplate.
void InstantiateBoilerplate(Handle<JSFunction> boilerplate); Result InstantiateBoilerplate(Handle<JSFunction> boilerplate);
// Support for type checks. // Support for type checks.
void GenerateIsSmi(ZoneList<Expression*>* args); void GenerateIsSmi(ZoneList<Expression*>* args);
@ -584,6 +592,9 @@ class CodeGenerator: public AstVisitor {
// Support for direct calls from JavaScript to native RegExp code. // Support for direct calls from JavaScript to native RegExp code.
void GenerateRegExpExec(ZoneList<Expression*>* args); void GenerateRegExpExec(ZoneList<Expression*>* args);
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
// Simple condition analysis. // Simple condition analysis.
enum ConditionAnalysis { enum ConditionAnalysis {
ALWAYS_TRUE, ALWAYS_TRUE,
@ -607,15 +618,14 @@ class CodeGenerator: public AstVisitor {
bool HasValidEntryRegisters(); bool HasValidEntryRegisters();
#endif #endif
bool is_eval_; // Tells whether code is generated for eval.
Handle<Script> script_;
ZoneList<DeferredCode*> deferred_; ZoneList<DeferredCode*> deferred_;
// Assembler // Assembler
MacroAssembler* masm_; // to generate code MacroAssembler* masm_; // to generate code
CompilationInfo* info_;
// Code generation state // Code generation state
Scope* scope_;
VirtualFrame* frame_; VirtualFrame* frame_;
RegisterAllocator* allocator_; RegisterAllocator* allocator_;
CodeGenState* state_; CodeGenState* state_;
@ -663,13 +673,15 @@ class GenericBinaryOpStub: public CodeStub {
public: public:
GenericBinaryOpStub(Token::Value op, GenericBinaryOpStub(Token::Value op,
OverwriteMode mode, OverwriteMode mode,
GenericBinaryFlags flags) GenericBinaryFlags flags,
NumberInfo::Type operands_type = NumberInfo::kUnknown)
: op_(op), : op_(op),
mode_(mode), mode_(mode),
flags_(flags), flags_(flags),
args_in_registers_(false), args_in_registers_(false),
args_reversed_(false), args_reversed_(false),
name_(NULL) { name_(NULL),
operands_type_(operands_type) {
use_sse3_ = CpuFeatures::IsSupported(SSE3); use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
} }
@ -694,28 +706,32 @@ class GenericBinaryOpStub: public CodeStub {
bool args_reversed_; // Left and right argument are swapped. bool args_reversed_; // Left and right argument are swapped.
bool use_sse3_; bool use_sse3_;
char* name_; char* name_;
NumberInfo::Type operands_type_; // Number type information of operands.
const char* GetName(); const char* GetName();
#ifdef DEBUG #ifdef DEBUG
void Print() { void Print() {
PrintF("GenericBinaryOpStub (op %s), " PrintF("GenericBinaryOpStub %d (op %s), "
"(mode %d, flags %d, registers %d, reversed %d)\n", "(mode %d, flags %d, registers %d, reversed %d, number_info %s)\n",
MinorKey(),
Token::String(op_), Token::String(op_),
static_cast<int>(mode_), static_cast<int>(mode_),
static_cast<int>(flags_), static_cast<int>(flags_),
static_cast<int>(args_in_registers_), static_cast<int>(args_in_registers_),
static_cast<int>(args_reversed_)); static_cast<int>(args_reversed_),
NumberInfo::ToString(operands_type_));
} }
#endif #endif
// Minor key encoding in 16 bits FRASOOOOOOOOOOMM. // Minor key encoding in 16 bits NNNFRASOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {}; class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 10> {}; class OpBits: public BitField<Token::Value, 2, 7> {};
class SSE3Bits: public BitField<bool, 12, 1> {}; class SSE3Bits: public BitField<bool, 9, 1> {};
class ArgsInRegistersBits: public BitField<bool, 13, 1> {}; class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
class ArgsReversedBits: public BitField<bool, 14, 1> {}; class ArgsReversedBits: public BitField<bool, 11, 1> {};
class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {}; class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
class NumberInfoBits: public BitField<NumberInfo::Type, 13, 3> {};
Major MajorKey() { return GenericBinaryOp; } Major MajorKey() { return GenericBinaryOp; }
int MinorKey() { int MinorKey() {
@ -725,7 +741,8 @@ class GenericBinaryOpStub: public CodeStub {
| FlagBits::encode(flags_) | FlagBits::encode(flags_)
| SSE3Bits::encode(use_sse3_) | SSE3Bits::encode(use_sse3_)
| ArgsInRegistersBits::encode(args_in_registers_) | ArgsInRegistersBits::encode(args_in_registers_)
| ArgsReversedBits::encode(args_reversed_); | ArgsReversedBits::encode(args_reversed_)
| NumberInfoBits::encode(operands_type_);
} }
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
@ -750,13 +767,6 @@ class GenericBinaryOpStub: public CodeStub {
}; };
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
};
class StringStubBase: public CodeStub { class StringStubBase: public CodeStub {
public: public:
// Generate code for copying characters using a simple loop. This should only // Generate code for copying characters using a simple loop. This should only
@ -779,6 +789,38 @@ class StringStubBase: public CodeStub {
Register count, // Must be ecx. Register count, // Must be ecx.
Register scratch, // Neither of the above. Register scratch, // Neither of the above.
bool ascii); bool ascii);
// Probe the symbol table for a two character string. If the string is
// not found by probing a jump to the label not_found is performed. This jump
// does not guarantee that the string is not in the symbol table. If the
// string is found the code falls through with the string in register eax.
void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
Register scratch2,
Register scratch3,
Label* not_found);
// Generate string hash.
void GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character,
Register scratch);
void GenerateHashAddCharacter(MacroAssembler* masm,
Register hash,
Register character,
Register scratch);
void GenerateHashGetHash(MacroAssembler* masm,
Register hash,
Register scratch);
};
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
}; };
@ -833,6 +875,39 @@ class StringCompareStub: public StringStubBase {
}; };
class NumberToStringStub: public CodeStub {
public:
NumberToStringStub() { }
// Generate code to do a lookup in the number string cache. If the number in
// the register object is found in the cache the generated code falls through
// with the result in the result register. The object and the result register
// can be the same. If the number is not found in the cache the code jumps to
// the label not_found with only the content of register object unchanged.
static void GenerateLookupNumberStringCache(MacroAssembler* masm,
Register object,
Register result,
Register scratch1,
Register scratch2,
bool object_is_smi,
Label* not_found);
private:
Major MajorKey() { return NumberToString; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "NumberToStringStub"; }
#ifdef DEBUG
void Print() {
PrintF("NumberToStringStub\n");
}
#endif
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_ #endif // V8_IA32_CODEGEN_IA32_H_

3
deps/v8/src/ia32/debug-ia32.cc

@ -125,9 +125,10 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) { void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Register state for IC load call (from ic-ia32.cc). // Register state for IC load call (from ic-ia32.cc).
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : receiver
// -- ecx : name // -- ecx : name
// ----------------------------------- // -----------------------------------
Generate_DebugBreakCallHelper(masm, ecx.bit(), false); Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), false);
} }

1
deps/v8/src/ia32/disasm-ia32.cc

@ -1014,7 +1014,6 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
int mod, regop, rm; int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm); get_modrm(*data, &mod, &regop, &rm);
const char* mnem = NULL; const char* mnem = NULL;
printf("%d\n", regop);
switch (regop) { switch (regop) {
case 5: mnem = "subb"; break; case 5: mnem = "subb"; break;
case 7: mnem = "cmpb"; break; case 7: mnem = "cmpb"; break;

195
deps/v8/src/ia32/fast-codegen-ia32.cc

@ -35,79 +35,152 @@ namespace internal {
#define __ ACCESS_MASM(masm()) #define __ ACCESS_MASM(masm())
void FastCodeGenerator::EmitLoadReceiver(Register reg) { Register FastCodeGenerator::accumulator0() { return eax; }
Register FastCodeGenerator::accumulator1() { return edx; }
Register FastCodeGenerator::scratch0() { return ecx; }
Register FastCodeGenerator::scratch1() { return edi; }
Register FastCodeGenerator::receiver_reg() { return ebx; }
Register FastCodeGenerator::context_reg() { return esi; }
void FastCodeGenerator::EmitLoadReceiver() {
// Offset 2 is due to return address and saved frame pointer. // Offset 2 is due to return address and saved frame pointer.
int index = 2 + function()->scope()->num_parameters(); int index = 2 + function()->scope()->num_parameters();
__ mov(reg, Operand(ebp, index * kPointerSize)); __ mov(receiver_reg(), Operand(ebp, index * kPointerSize));
} }
void FastCodeGenerator::EmitReceiverMapCheck() { void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> cell) {
Comment cmnt(masm(), ";; MapCheck(this)"); ASSERT(!destination().is(no_reg));
if (FLAG_print_ir) { ASSERT(cell->IsJSGlobalPropertyCell());
PrintF("MapCheck(this)\n");
}
EmitLoadReceiver(edx); __ mov(destination(), Immediate(cell));
__ test(edx, Immediate(kSmiTagMask)); __ mov(destination(),
__ j(zero, bailout()); FieldOperand(destination(), JSGlobalPropertyCell::kValueOffset));
if (FLAG_debug_code) {
__ cmp(destination(), Factory::the_hole_value());
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
ASSERT(has_receiver() && receiver()->IsHeapObject()); // The loaded value is not known to be a smi.
Handle<HeapObject> object = Handle<HeapObject>::cast(receiver()); clear_as_smi(destination());
Handle<Map> map(object->map());
__ cmp(FieldOperand(edx, HeapObject::kMapOffset), Immediate(map));
__ j(not_equal, bailout());
} }
void FastCodeGenerator::EmitGlobalVariableLoad(Handle<String> name) { void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
// Compile global variable accesses as load IC calls. The only live LookupResult lookup;
// registers are esi (context) and possibly edx (this). Both are also info()->receiver()->Lookup(*name, &lookup);
// saved in the stack and esi is preserved by the call.
__ push(CodeGenerator::GlobalObject()); ASSERT(lookup.holder() == *info()->receiver());
__ mov(ecx, name); ASSERT(lookup.type() == FIELD);
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
__ call(ic, RelocInfo::CODE_TARGET_CONTEXT); int index = lookup.GetFieldIndex() - map->inobject_properties();
if (has_this_properties()) { int offset = index * kPointerSize;
// Restore this.
EmitLoadReceiver(edx); // We will emit the write barrier unless the stored value is statically
// known to be a smi.
bool needs_write_barrier = !is_smi(accumulator0());
// Perform the store. Negative offsets are inobject properties.
if (offset < 0) {
offset += map->instance_size();
__ mov(FieldOperand(receiver_reg(), offset), accumulator0());
if (needs_write_barrier) {
// Preserve receiver from write barrier.
__ mov(scratch0(), receiver_reg());
}
} else { } else {
__ nop(); // Not test eax, indicates IC has no inlined code at call site. offset += FixedArray::kHeaderSize;
__ mov(scratch0(),
FieldOperand(receiver_reg(), JSObject::kPropertiesOffset));
__ mov(FieldOperand(scratch0(), offset), accumulator0());
}
if (needs_write_barrier) {
if (destination().is(no_reg)) {
// After RecordWrite accumulator0 is only accidently a smi, but it is
// already marked as not known to be one.
__ RecordWrite(scratch0(), offset, accumulator0(), scratch1());
} else {
// Copy the value to the other accumulator to preserve a copy from the
// write barrier. One of the accumulators is available as a scratch
// register. Neither is a smi.
__ mov(accumulator1(), accumulator0());
clear_as_smi(accumulator1());
Register value_scratch = other_accumulator(destination());
__ RecordWrite(scratch0(), offset, value_scratch, scratch1());
}
} else if (destination().is(accumulator1())) {
__ mov(accumulator1(), accumulator0());
// Is a smi because we do not need the write barrier.
set_as_smi(accumulator1());
} }
} }
void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) { void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
ASSERT(!destination().is(no_reg));
LookupResult lookup; LookupResult lookup;
receiver()->Lookup(*name, &lookup); info()->receiver()->Lookup(*name, &lookup);
ASSERT(lookup.holder() == *receiver()); ASSERT(lookup.holder() == *info()->receiver());
ASSERT(lookup.type() == FIELD); ASSERT(lookup.type() == FIELD);
Handle<Map> map(Handle<HeapObject>::cast(receiver())->map()); Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
int index = lookup.GetFieldIndex() - map->inobject_properties(); int index = lookup.GetFieldIndex() - map->inobject_properties();
int offset = index * kPointerSize; int offset = index * kPointerSize;
// Negative offsets are inobject properties. // Perform the load. Negative offsets are inobject properties.
if (offset < 0) { if (offset < 0) {
offset += map->instance_size(); offset += map->instance_size();
__ mov(ecx, edx); // Copy receiver for write barrier. __ mov(destination(), FieldOperand(receiver_reg(), offset));
} else { } else {
offset += FixedArray::kHeaderSize; offset += FixedArray::kHeaderSize;
__ mov(ecx, FieldOperand(edx, JSObject::kPropertiesOffset)); __ mov(scratch0(),
FieldOperand(receiver_reg(), JSObject::kPropertiesOffset));
__ mov(destination(), FieldOperand(scratch0(), offset));
}
// The loaded value is not known to be a smi.
clear_as_smi(destination());
}
void FastCodeGenerator::EmitBitOr() {
if (is_smi(accumulator0()) && is_smi(accumulator1())) {
// If both operands are known to be a smi then there is no need to check
// the operands or result. There is no need to perform the operation in
// an effect context.
if (!destination().is(no_reg)) {
// Leave the result in the destination register. Bitwise or is
// commutative.
__ or_(destination(), Operand(other_accumulator(destination())));
}
} else if (destination().is(no_reg)) {
// Result is not needed but do not clobber the operands in case of
// bailout.
__ mov(scratch0(), accumulator1());
__ or_(scratch0(), Operand(accumulator0()));
__ test(scratch0(), Immediate(kSmiTagMask));
__ j(not_zero, bailout(), not_taken);
} else {
// Preserve the destination operand in a scratch register in case of
// bailout.
__ mov(scratch0(), destination());
__ or_(destination(), Operand(other_accumulator(destination())));
__ test(destination(), Immediate(kSmiTagMask));
__ j(not_zero, bailout(), not_taken);
} }
// Perform the store.
__ mov(FieldOperand(ecx, offset), eax); // If we didn't bailout, the result (in fact, both inputs too) is known to
// Preserve value from write barrier in case it's needed. // be a smi.
__ mov(ebx, eax); set_as_smi(accumulator0());
__ RecordWrite(ecx, offset, ebx, edi); set_as_smi(accumulator1());
} }
void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) { void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
ASSERT(function_ == NULL);
ASSERT(info_ == NULL); ASSERT(info_ == NULL);
function_ = fun; info_ = compilation_info;
info_ = info;
// Save the caller's frame pointer and set up our own. // Save the caller's frame pointer and set up our own.
Comment prologue_cmnt(masm(), ";; Prologue"); Comment prologue_cmnt(masm(), ";; Prologue");
@ -118,18 +191,42 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun, CompilationInfo* info) {
// Note that we keep a live register reference to esi (context) at this // Note that we keep a live register reference to esi (context) at this
// point. // point.
// Receiver (this) is allocated to edx if there are this properties. // Receiver (this) is allocated to a fixed register.
if (has_this_properties()) EmitReceiverMapCheck(); if (info()->has_this_properties()) {
Comment cmnt(masm(), ";; MapCheck(this)");
if (FLAG_print_ir) {
PrintF("#: MapCheck(this)\n");
}
ASSERT(info()->has_receiver() && info()->receiver()->IsHeapObject());
Handle<HeapObject> object = Handle<HeapObject>::cast(info()->receiver());
Handle<Map> map(object->map());
EmitLoadReceiver();
__ CheckMap(receiver_reg(), map, bailout(), false);
}
// If there is a global variable access check if the global object is the
// same as at lazy-compilation time.
if (info()->has_globals()) {
Comment cmnt(masm(), ";; MapCheck(GLOBAL)");
if (FLAG_print_ir) {
PrintF("#: MapCheck(GLOBAL)\n");
}
ASSERT(info()->has_global_object());
Handle<Map> map(info()->global_object()->map());
__ mov(scratch0(), CodeGenerator::GlobalObject());
__ CheckMap(scratch0(), map, bailout(), true);
}
VisitStatements(fun->body()); VisitStatements(function()->body());
Comment return_cmnt(masm(), ";; Return(<undefined>)"); Comment return_cmnt(masm(), ";; Return(<undefined>)");
if (FLAG_print_ir) {
PrintF("#: Return(<undefined>)\n");
}
__ mov(eax, Factory::undefined_value()); __ mov(eax, Factory::undefined_value());
Comment epilogue_cmnt(masm(), ";; Epilogue");
__ mov(esp, ebp); __ mov(esp, ebp);
__ pop(ebp); __ pop(ebp);
__ ret((fun->scope()->num_parameters() + 1) * kPointerSize); __ ret((scope()->num_parameters() + 1) * kPointerSize);
__ bind(&bailout_); __ bind(&bailout_);
} }

194
deps/v8/src/ia32/full-codegen-ia32.cc

@ -51,9 +51,10 @@ namespace internal {
// //
// The function builds a JS frame. Please see JavaScriptFrameConstants in // The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-ia32.h for its layout. // frames-ia32.h for its layout.
void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) { void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
function_ = fun; ASSERT(info_ == NULL);
SetFunctionPosition(fun); info_ = info;
SetFunctionPosition(function());
if (mode == PRIMARY) { if (mode == PRIMARY) {
__ push(ebp); // Caller's frame pointer. __ push(ebp); // Caller's frame pointer.
@ -62,7 +63,7 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
__ push(edi); // Callee's JS Function. __ push(edi); // Callee's JS Function.
{ Comment cmnt(masm_, "[ Allocate locals"); { Comment cmnt(masm_, "[ Allocate locals");
int locals_count = fun->scope()->num_stack_slots(); int locals_count = scope()->num_stack_slots();
if (locals_count == 1) { if (locals_count == 1) {
__ push(Immediate(Factory::undefined_value())); __ push(Immediate(Factory::undefined_value()));
} else if (locals_count > 1) { } else if (locals_count > 1) {
@ -76,7 +77,7 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
bool function_in_register = true; bool function_in_register = true;
// Possibly allocate a local context. // Possibly allocate a local context.
if (fun->scope()->num_heap_slots() > 0) { if (scope()->num_heap_slots() > 0) {
Comment cmnt(masm_, "[ Allocate local context"); Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in edi. // Argument to NewContext is the function, which is still in edi.
__ push(edi); __ push(edi);
@ -87,9 +88,9 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
__ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi); __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
// Copy parameters into context if necessary. // Copy parameters into context if necessary.
int num_parameters = fun->scope()->num_parameters(); int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) { for (int i = 0; i < num_parameters; i++) {
Slot* slot = fun->scope()->parameter(i)->slot(); Slot* slot = scope()->parameter(i)->slot();
if (slot != NULL && slot->type() == Slot::CONTEXT) { if (slot != NULL && slot->type() == Slot::CONTEXT) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset + int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize; (num_parameters - 1 - i) * kPointerSize;
@ -107,7 +108,7 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
} }
} }
Variable* arguments = fun->scope()->arguments()->AsVariable(); Variable* arguments = scope()->arguments()->AsVariable();
if (arguments != NULL) { if (arguments != NULL) {
// Function uses arguments object. // Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object"); Comment cmnt(masm_, "[ Allocate arguments object");
@ -117,10 +118,11 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
} }
// Receiver is just before the parameters on the caller's stack. // Receiver is just before the parameters on the caller's stack.
__ lea(edx, Operand(ebp, StandardFrameConstants::kCallerSPOffset + int offset = scope()->num_parameters() * kPointerSize;
fun->num_parameters() * kPointerSize)); __ lea(edx,
Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
__ push(edx); __ push(edx);
__ push(Immediate(Smi::FromInt(fun->num_parameters()))); __ push(Immediate(Smi::FromInt(scope()->num_parameters())));
// Arguments to ArgumentsAccessStub: // Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count. // function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous // The stub will rewrite receiver and parameter count if the previous
@ -130,13 +132,13 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
__ mov(ecx, eax); // Duplicate result. __ mov(ecx, eax); // Duplicate result.
Move(arguments->slot(), eax, ebx, edx); Move(arguments->slot(), eax, ebx, edx);
Slot* dot_arguments_slot = Slot* dot_arguments_slot =
fun->scope()->arguments_shadow()->AsVariable()->slot(); scope()->arguments_shadow()->AsVariable()->slot();
Move(dot_arguments_slot, ecx, ebx, edx); Move(dot_arguments_slot, ecx, ebx, edx);
} }
} }
{ Comment cmnt(masm_, "[ Declarations"); { Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(fun->scope()->declarations()); VisitDeclarations(scope()->declarations());
} }
{ Comment cmnt(masm_, "[ Stack check"); { Comment cmnt(masm_, "[ Stack check");
@ -156,14 +158,14 @@ void FullCodeGenerator::Generate(FunctionLiteral* fun, Mode mode) {
{ Comment cmnt(masm_, "[ Body"); { Comment cmnt(masm_, "[ Body");
ASSERT(loop_depth() == 0); ASSERT(loop_depth() == 0);
VisitStatements(fun->body()); VisitStatements(function()->body());
ASSERT(loop_depth() == 0); ASSERT(loop_depth() == 0);
} }
{ Comment cmnt(masm_, "[ return <undefined>;"); { Comment cmnt(masm_, "[ return <undefined>;");
// Emit a 'return undefined' in case control fell off the end of the body. // Emit a 'return undefined' in case control fell off the end of the body.
__ mov(eax, Factory::undefined_value()); __ mov(eax, Factory::undefined_value());
EmitReturnSequence(function_->end_position()); EmitReturnSequence(function()->end_position());
} }
} }
@ -190,7 +192,7 @@ void FullCodeGenerator::EmitReturnSequence(int position) {
// patch with the code required by the debugger. // patch with the code required by the debugger.
__ mov(esp, ebp); __ mov(esp, ebp);
__ pop(ebp); __ pop(ebp);
__ ret((function_->scope()->num_parameters() + 1) * kPointerSize); __ ret((scope()->num_parameters() + 1) * kPointerSize);
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
// Check that the size of the code used for returning matches what is // Check that the size of the code used for returning matches what is
// expected by the debugger. // expected by the debugger.
@ -627,7 +629,7 @@ MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
return Operand(ebp, SlotOffset(slot)); return Operand(ebp, SlotOffset(slot));
case Slot::CONTEXT: { case Slot::CONTEXT: {
int context_chain_length = int context_chain_length =
function_->scope()->ContextChainLength(slot->var()->scope()); scope()->ContextChainLength(slot->var()->scope());
__ LoadContext(scratch, context_chain_length); __ LoadContext(scratch, context_chain_length);
return CodeGenerator::ContextOperand(scratch, slot->index()); return CodeGenerator::ContextOperand(scratch, slot->index());
} }
@ -686,7 +688,7 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
// this specific context. // this specific context.
// The variable in the decl always resides in the current context. // The variable in the decl always resides in the current context.
ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope())); ASSERT_EQ(0, scope()->ContextChainLength(var->scope()));
if (FLAG_debug_code) { if (FLAG_debug_code) {
// Check if we have the correct context pointer. // Check if we have the correct context pointer.
__ mov(ebx, __ mov(ebx,
@ -764,7 +766,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals. // Call the runtime to declare the globals.
__ push(esi); // The context is the first argument. __ push(esi); // The context is the first argument.
__ push(Immediate(pairs)); __ push(Immediate(pairs));
__ push(Immediate(Smi::FromInt(is_eval_ ? 1 : 0))); __ push(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
__ CallRuntime(Runtime::kDeclareGlobals, 3); __ CallRuntime(Runtime::kDeclareGlobals, 3);
// Return value is ignored. // Return value is ignored.
} }
@ -775,7 +777,7 @@ void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
// Build the function boilerplate and instantiate it. // Build the function boilerplate and instantiate it.
Handle<JSFunction> boilerplate = Handle<JSFunction> boilerplate =
Compiler::BuildBoilerplate(expr, script_, this); Compiler::BuildBoilerplate(expr, script(), this);
if (HasStackOverflow()) return; if (HasStackOverflow()) return;
ASSERT(boilerplate->IsBoilerplate()); ASSERT(boilerplate->IsBoilerplate());
@ -806,7 +808,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
Comment cmnt(masm_, "Global variable"); Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in ecx and the global // Use inline caching. Variable name is passed in ecx and the global
// object on the stack. // object on the stack.
__ push(CodeGenerator::GlobalObject()); __ mov(eax, CodeGenerator::GlobalObject());
__ mov(ecx, var->name()); __ mov(ecx, var->name());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET_CONTEXT); __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
@ -815,7 +817,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
// Remember that the assembler may choose to do peephole optimization // Remember that the assembler may choose to do peephole optimization
// (eg, push/pop elimination). // (eg, push/pop elimination).
__ nop(); __ nop();
DropAndApply(1, context, eax); Apply(context, eax);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) { } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
Comment cmnt(masm_, "Lookup slot"); Comment cmnt(masm_, "Lookup slot");
@ -843,7 +845,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
// Load the object. // Load the object.
MemOperand object_loc = EmitSlotSearch(object_slot, eax); MemOperand object_loc = EmitSlotSearch(object_slot, eax);
__ push(object_loc); __ mov(edx, object_loc);
// Assert that the key is a smi. // Assert that the key is a smi.
Literal* key_literal = property->key()->AsLiteral(); Literal* key_literal = property->key()->AsLiteral();
@ -851,7 +853,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
ASSERT(key_literal->handle()->IsSmi()); ASSERT(key_literal->handle()->IsSmi());
// Load the key. // Load the key.
__ push(Immediate(key_literal->handle())); __ mov(eax, Immediate(key_literal->handle()));
// Do a keyed property load. // Do a keyed property load.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
@ -860,7 +862,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
// call. It is treated specially by the LoadIC code. // call. It is treated specially by the LoadIC code.
__ nop(); __ nop();
// Drop key and object left on the stack by IC. // Drop key and object left on the stack by IC.
DropAndApply(2, context, eax); Apply(context, eax);
} }
} }
@ -1011,6 +1013,99 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} }
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
Comment cmnt(masm_, "[ Assignment");
ASSERT(expr->op() != Token::INIT_CONST);
// Left-hand side can only be a property, a global or a (parameter or local)
// slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->target()->AsProperty();
if (prop != NULL) {
assign_type =
(prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
}
// Evaluate LHS expression.
switch (assign_type) {
case VARIABLE:
// Nothing to do here.
break;
case NAMED_PROPERTY:
if (expr->is_compound()) {
// We need the receiver both on the stack and in the accumulator.
VisitForValue(prop->obj(), kAccumulator);
__ push(result_register());
} else {
VisitForValue(prop->obj(), kStack);
}
break;
case KEYED_PROPERTY:
if (expr->is_compound()) {
VisitForValue(prop->obj(), kStack);
VisitForValue(prop->key(), kAccumulator);
__ mov(edx, Operand(esp, 0));
__ push(eax);
} else {
VisitForValue(prop->obj(), kStack);
VisitForValue(prop->key(), kStack);
}
break;
}
// If we have a compound assignment: Get value of LHS expression and
// store in on top of the stack.
if (expr->is_compound()) {
Location saved_location = location_;
location_ = kStack;
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
Expression::kValue);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(prop);
__ push(result_register());
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(prop);
__ push(result_register());
break;
}
location_ = saved_location;
}
// Evaluate RHS expression.
Expression* rhs = expr->value();
VisitForValue(rhs, kAccumulator);
// If we have a compound assignment: Apply operator.
if (expr->is_compound()) {
Location saved_location = location_;
location_ = kAccumulator;
EmitBinaryOp(expr->binary_op(), Expression::kValue);
location_ = saved_location;
}
// Record source position before possible IC call.
SetSourcePosition(expr->position());
// Store the value.
switch (assign_type) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
context_);
break;
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
break;
}
}
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position()); SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral(); Literal* key = prop->key()->AsLiteral();
@ -1181,18 +1276,16 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property"); Comment cmnt(masm_, "[ Property");
Expression* key = expr->key(); Expression* key = expr->key();
// Evaluate the receiver.
VisitForValue(expr->obj(), kStack);
if (key->IsPropertyName()) { if (key->IsPropertyName()) {
VisitForValue(expr->obj(), kAccumulator);
EmitNamedPropertyLoad(expr); EmitNamedPropertyLoad(expr);
// Drop receiver left on the stack by IC. Apply(context_, eax);
DropAndApply(1, context_, eax);
} else { } else {
VisitForValue(expr->key(), kStack); VisitForValue(expr->obj(), kStack);
VisitForValue(expr->key(), kAccumulator);
__ pop(edx);
EmitKeyedPropertyLoad(expr); EmitKeyedPropertyLoad(expr);
// Drop key and receiver left on the stack by IC. Apply(context_, eax);
DropAndApply(2, context_, eax);
} }
} }
@ -1263,25 +1356,31 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Call to a keyed property, use keyed load IC followed by function // Call to a keyed property, use keyed load IC followed by function
// call. // call.
VisitForValue(prop->obj(), kStack); VisitForValue(prop->obj(), kStack);
VisitForValue(prop->key(), kStack); VisitForValue(prop->key(), kAccumulator);
// Record source code position for IC call. // Record source code position for IC call.
SetSourcePosition(prop->position()); SetSourcePosition(prop->position());
if (prop->is_synthetic()) {
__ pop(edx); // We do not need to keep the receiver.
} else {
__ mov(edx, Operand(esp, 0)); // Keep receiver, to call function on.
}
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET); __ call(ic, RelocInfo::CODE_TARGET);
// By emitting a nop we make sure that we do not have a "test eax,..." // By emitting a nop we make sure that we do not have a "test eax,..."
// instruction after the call it is treated specially by the LoadIC code. // instruction after the call it is treated specially by the LoadIC code.
__ nop(); __ nop();
// Drop key left on the stack by IC.
__ Drop(1);
// Pop receiver.
__ pop(ebx);
// Push result (function).
__ push(eax);
// Push receiver object on stack.
if (prop->is_synthetic()) { if (prop->is_synthetic()) {
// Push result (function).
__ push(eax);
// Push Global receiver.
__ mov(ecx, CodeGenerator::GlobalObject()); __ mov(ecx, CodeGenerator::GlobalObject());
__ push(FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset)); __ push(FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
} else { } else {
// Pop receiver.
__ pop(ebx);
// Push result (function).
__ push(eax);
__ push(ebx); __ push(ebx);
} }
EmitCallWithStub(expr); EmitCallWithStub(expr);
@ -1453,13 +1552,13 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
!proxy->var()->is_this() && !proxy->var()->is_this() &&
proxy->var()->is_global()) { proxy->var()->is_global()) {
Comment cmnt(masm_, "Global variable"); Comment cmnt(masm_, "Global variable");
__ push(CodeGenerator::GlobalObject()); __ mov(eax, CodeGenerator::GlobalObject());
__ mov(ecx, Immediate(proxy->name())); __ mov(ecx, Immediate(proxy->name()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Use a regular load, not a contextual load, to avoid a reference // Use a regular load, not a contextual load, to avoid a reference
// error. // error.
__ call(ic, RelocInfo::CODE_TARGET); __ call(ic, RelocInfo::CODE_TARGET);
__ mov(Operand(esp, 0), eax); __ push(eax);
} else if (proxy != NULL && } else if (proxy != NULL &&
proxy->var()->slot() != NULL && proxy->var()->slot() != NULL &&
proxy->var()->slot()->type() == Slot::LOOKUP) { proxy->var()->slot()->type() == Slot::LOOKUP) {
@ -1563,11 +1662,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (expr->is_postfix() && context_ != Expression::kEffect) { if (expr->is_postfix() && context_ != Expression::kEffect) {
__ push(Immediate(Smi::FromInt(0))); __ push(Immediate(Smi::FromInt(0)));
} }
VisitForValue(prop->obj(), kStack);
if (assign_type == NAMED_PROPERTY) { if (assign_type == NAMED_PROPERTY) {
// Put the object both on the stack and in the accumulator.
VisitForValue(prop->obj(), kAccumulator);
__ push(eax);
EmitNamedPropertyLoad(prop); EmitNamedPropertyLoad(prop);
} else { } else {
VisitForValue(prop->key(), kStack); VisitForValue(prop->obj(), kStack);
VisitForValue(prop->key(), kAccumulator);
__ mov(edx, Operand(esp, 0));
__ push(eax);
EmitKeyedPropertyLoad(prop); EmitKeyedPropertyLoad(prop);
} }
} }

462
deps/v8/src/ia32/ic-ia32.cc

@ -50,28 +50,29 @@ namespace internal {
// or if name is not a symbol, and will jump to the miss_label in that case. // or if name is not a symbol, and will jump to the miss_label in that case.
static void GenerateDictionaryLoad(MacroAssembler* masm, static void GenerateDictionaryLoad(MacroAssembler* masm,
Label* miss_label, Label* miss_label,
Register receiver,
Register name,
Register r0, Register r0,
Register r1, Register r1,
Register r2, Register r2,
Register name,
DictionaryCheck check_dictionary) { DictionaryCheck check_dictionary) {
// Register use: // Register use:
// //
// name - holds the name of the property and is unchanged.
// receiver - holds the receiver and is unchanged.
// Scratch registers:
// r0 - used to hold the property dictionary. // r0 - used to hold the property dictionary.
// //
// r1 - initially the receiver // r1 - used for the index into the property dictionary
// - used for the index into the property dictionary
// - holds the result on exit. // - holds the result on exit.
// //
// r2 - used to hold the capacity of the property dictionary. // r2 - used to hold the capacity of the property dictionary.
//
// name - holds the name of the property and is unchanged.
Label done; Label done;
// Check for the absence of an interceptor. // Check for the absence of an interceptor.
// Load the map into r0. // Load the map into r0.
__ mov(r0, FieldOperand(r1, JSObject::kMapOffset)); __ mov(r0, FieldOperand(receiver, JSObject::kMapOffset));
// Test the has_named_interceptor bit in the map. // Test the has_named_interceptor bit in the map.
__ test(FieldOperand(r0, Map::kInstanceAttributesOffset), __ test(FieldOperand(r0, Map::kInstanceAttributesOffset),
Immediate(1 << (Map::kHasNamedInterceptor + (3 * 8)))); Immediate(1 << (Map::kHasNamedInterceptor + (3 * 8))));
@ -91,7 +92,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
__ j(equal, miss_label, not_taken); __ j(equal, miss_label, not_taken);
// Load properties array. // Load properties array.
__ mov(r0, FieldOperand(r1, JSObject::kPropertiesOffset)); __ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
// Check that the properties array is a dictionary. // Check that the properties array is a dictionary.
if (check_dictionary == CHECK_DICTIONARY) { if (check_dictionary == CHECK_DICTIONARY) {
@ -176,14 +177,12 @@ const int LoadIC::kOffsetToLoadInstruction = 13;
void LoadIC::GenerateArrayLength(MacroAssembler* masm) { void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : receiver
// -- ecx : name // -- ecx : name
// -- esp[0] : return address // -- esp[0] : return address
// -- esp[4] : receiver
// ----------------------------------- // -----------------------------------
Label miss; Label miss;
__ mov(eax, Operand(esp, kPointerSize));
StubCompiler::GenerateLoadArrayLength(masm, eax, edx, &miss); StubCompiler::GenerateLoadArrayLength(masm, eax, edx, &miss);
__ bind(&miss); __ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@ -192,15 +191,13 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
void LoadIC::GenerateStringLength(MacroAssembler* masm) { void LoadIC::GenerateStringLength(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : receiver
// -- ecx : name // -- ecx : name
// -- esp[0] : return address // -- esp[0] : return address
// -- esp[4] : receiver
// ----------------------------------- // -----------------------------------
Label miss; Label miss;
__ mov(eax, Operand(esp, kPointerSize)); StubCompiler::GenerateLoadStringLength(masm, eax, edx, ebx, &miss);
StubCompiler::GenerateLoadStringLength(masm, eax, edx, &miss);
__ bind(&miss); __ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
} }
@ -208,14 +205,12 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm) {
void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) { void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : receiver
// -- ecx : name // -- ecx : name
// -- esp[0] : return address // -- esp[0] : return address
// -- esp[4] : receiver
// ----------------------------------- // -----------------------------------
Label miss; Label miss;
__ mov(eax, Operand(esp, kPointerSize));
StubCompiler::GenerateLoadFunctionPrototype(masm, eax, edx, ebx, &miss); StubCompiler::GenerateLoadFunctionPrototype(masm, eax, edx, ebx, &miss);
__ bind(&miss); __ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@ -224,26 +219,22 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address // -- esp[0] : return address
// -- esp[4] : name
// -- esp[8] : receiver
// ----------------------------------- // -----------------------------------
Label slow, check_string, index_int, index_string; Label slow, check_string, index_int, index_string;
Label check_pixel_array, probe_dictionary; Label check_pixel_array, probe_dictionary;
// Load name and receiver.
__ mov(eax, Operand(esp, kPointerSize));
__ mov(ecx, Operand(esp, 2 * kPointerSize));
// Check that the object isn't a smi. // Check that the object isn't a smi.
__ test(ecx, Immediate(kSmiTagMask)); __ test(edx, Immediate(kSmiTagMask));
__ j(zero, &slow, not_taken); __ j(zero, &slow, not_taken);
// Get the map of the receiver. // Get the map of the receiver.
__ mov(edx, FieldOperand(ecx, HeapObject::kMapOffset)); __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
// Check bit field. // Check bit field.
__ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset)); __ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
__ test(ebx, Immediate(kSlowCaseBitFieldMask)); __ test(ebx, Immediate(kSlowCaseBitFieldMask));
__ j(not_zero, &slow, not_taken); __ j(not_zero, &slow, not_taken);
// Check that the object is some kind of JS object EXCEPT JS Value type. // Check that the object is some kind of JS object EXCEPT JS Value type.
@ -251,56 +242,58 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// we enter the runtime system to make sure that indexing // we enter the runtime system to make sure that indexing
// into string objects work as intended. // into string objects work as intended.
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE); ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ movzx_b(edx, FieldOperand(edx, Map::kInstanceTypeOffset)); __ CmpInstanceType(ecx, JS_OBJECT_TYPE);
__ cmp(edx, JS_OBJECT_TYPE); __ j(below, &slow, not_taken);
__ j(less, &slow, not_taken);
// Check that the key is a smi. // Check that the key is a smi.
__ test(eax, Immediate(kSmiTagMask)); __ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &check_string, not_taken); __ j(not_zero, &check_string, not_taken);
__ sar(eax, kSmiTagSize); __ mov(ebx, eax);
__ SmiUntag(ebx);
// Get the elements array of the object. // Get the elements array of the object.
__ bind(&index_int); __ bind(&index_int);
__ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset)); __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary). // Check that the object is in fast mode (not dictionary).
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset), __ CheckMap(ecx, Factory::fixed_array_map(), &check_pixel_array, true);
Immediate(Factory::fixed_array_map()));
__ j(not_equal, &check_pixel_array);
// Check that the key (index) is within bounds. // Check that the key (index) is within bounds.
__ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset)); __ cmp(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
__ j(above_equal, &slow); __ j(above_equal, &slow);
// Fast case: Do the load. // Fast case: Do the load.
__ mov(eax, __ mov(ecx, FieldOperand(ecx, ebx, times_4, FixedArray::kHeaderSize));
Operand(ecx, eax, times_4, FixedArray::kHeaderSize - kHeapObjectTag)); __ cmp(Operand(ecx), Immediate(Factory::the_hole_value()));
__ cmp(Operand(eax), Immediate(Factory::the_hole_value()));
// In case the loaded value is the_hole we have to consult GetProperty // In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched. // to ensure the prototype chain is searched.
__ j(equal, &slow); __ j(equal, &slow);
__ mov(eax, ecx);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1); __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
__ ret(0); __ ret(0);
// Check whether the elements is a pixel array.
// eax: untagged index
// ecx: elements array
__ bind(&check_pixel_array); __ bind(&check_pixel_array);
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset), // Check whether the elements is a pixel array.
Immediate(Factory::pixel_array_map())); // edx: receiver
__ j(not_equal, &slow); // ebx: untagged index
__ cmp(eax, FieldOperand(ecx, PixelArray::kLengthOffset)); // eax: key
// ecx: elements
__ CheckMap(ecx, Factory::pixel_array_map(), &slow, true);
__ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
__ j(above_equal, &slow); __ j(above_equal, &slow);
__ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset)); __ mov(eax, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
__ movzx_b(eax, Operand(ecx, eax, times_1, 0)); __ movzx_b(eax, Operand(eax, ebx, times_1, 0));
__ shl(eax, kSmiTagSize); __ SmiTag(eax);
__ ret(0); __ ret(0);
// Slow case: Load name and receiver from stack and jump to runtime.
__ bind(&slow); __ bind(&slow);
// Slow case: jump to runtime.
// edx: receiver
// eax: key
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1); __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
Generate(masm, ExternalReference(Runtime::kKeyedGetProperty)); GenerateRuntimeGetProperty(masm);
__ bind(&check_string); __ bind(&check_string);
// The key is not a smi. // The key is not a smi.
// Is it a string? // Is it a string?
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx); // edx: receiver
// eax: key
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &slow); __ j(above_equal, &slow);
// Is the string an array index, with cached numeric value? // Is the string an array index, with cached numeric value?
__ mov(ebx, FieldOperand(eax, String::kHashFieldOffset)); __ mov(ebx, FieldOperand(eax, String::kHashFieldOffset));
@ -308,55 +301,58 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ j(not_zero, &index_string, not_taken); __ j(not_zero, &index_string, not_taken);
// Is the string a symbol? // Is the string a symbol?
__ movzx_b(ebx, FieldOperand(edx, Map::kInstanceTypeOffset)); __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceTypeOffset));
ASSERT(kSymbolTag != 0); ASSERT(kSymbolTag != 0);
__ test(ebx, Immediate(kIsSymbolMask)); __ test(ebx, Immediate(kIsSymbolMask));
__ j(zero, &slow, not_taken); __ j(zero, &slow, not_taken);
// If the receiver is a fast-case object, check the keyed lookup // If the receiver is a fast-case object, check the keyed lookup
// cache. Otherwise probe the dictionary leaving result in ecx. // cache. Otherwise probe the dictionary.
__ mov(ebx, FieldOperand(ecx, JSObject::kPropertiesOffset)); __ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset), __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(Factory::hash_table_map())); Immediate(Factory::hash_table_map()));
__ j(equal, &probe_dictionary); __ j(equal, &probe_dictionary);
// Load the map of the receiver, compute the keyed lookup cache hash // Load the map of the receiver, compute the keyed lookup cache hash
// based on 32 bits of the map pointer and the string hash. // based on 32 bits of the map pointer and the string hash.
__ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset)); __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
__ mov(edx, ebx); __ mov(ecx, ebx);
__ shr(edx, KeyedLookupCache::kMapHashShift); __ shr(ecx, KeyedLookupCache::kMapHashShift);
__ mov(eax, FieldOperand(eax, String::kHashFieldOffset)); __ mov(edi, FieldOperand(eax, String::kHashFieldOffset));
__ shr(eax, String::kHashShift); __ shr(edi, String::kHashShift);
__ xor_(edx, Operand(eax)); __ xor_(ecx, Operand(edi));
__ and_(edx, KeyedLookupCache::kCapacityMask); __ and_(ecx, KeyedLookupCache::kCapacityMask);
// Load the key (consisting of map and symbol) from the cache and // Load the key (consisting of map and symbol) from the cache and
// check for match. // check for match.
ExternalReference cache_keys ExternalReference cache_keys
= ExternalReference::keyed_lookup_cache_keys(); = ExternalReference::keyed_lookup_cache_keys();
__ mov(edi, edx); __ mov(edi, ecx);
__ shl(edi, kPointerSizeLog2 + 1); __ shl(edi, kPointerSizeLog2 + 1);
__ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys)); __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow); __ j(not_equal, &slow);
__ add(Operand(edi), Immediate(kPointerSize)); __ add(Operand(edi), Immediate(kPointerSize));
__ mov(edi, Operand::StaticArray(edi, times_1, cache_keys)); __ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
__ cmp(edi, Operand(esp, kPointerSize));
__ j(not_equal, &slow); __ j(not_equal, &slow);
// Get field offset and check that it is an in-object property. // Get field offset and check that it is an in-object property.
// edx : receiver
// ebx : receiver's map
// eax : key
// ecx : lookup cache index
ExternalReference cache_field_offsets ExternalReference cache_field_offsets
= ExternalReference::keyed_lookup_cache_field_offsets(); = ExternalReference::keyed_lookup_cache_field_offsets();
__ mov(eax, __ mov(edi,
Operand::StaticArray(edx, times_pointer_size, cache_field_offsets)); Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
__ movzx_b(edx, FieldOperand(ebx, Map::kInObjectPropertiesOffset)); __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
__ cmp(eax, Operand(edx)); __ cmp(edi, Operand(ecx));
__ j(above_equal, &slow); __ j(above_equal, &slow);
// Load in-object property. // Load in-object property.
__ sub(eax, Operand(edx)); __ sub(edi, Operand(ecx));
__ movzx_b(edx, FieldOperand(ebx, Map::kInstanceSizeOffset)); __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ add(eax, Operand(edx)); __ add(ecx, Operand(edi));
__ mov(eax, FieldOperand(ecx, eax, times_pointer_size, 0)); __ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
__ ret(0); __ ret(0);
// Do a quick inline probe of the receiver's dictionary, if it // Do a quick inline probe of the receiver's dictionary, if it
@ -364,13 +360,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&probe_dictionary); __ bind(&probe_dictionary);
GenerateDictionaryLoad(masm, GenerateDictionaryLoad(masm,
&slow, &slow,
ebx,
ecx,
edx, edx,
eax, eax,
ebx,
ecx,
edi,
DICTIONARY_CHECK_DONE); DICTIONARY_CHECK_DONE);
GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, edx); GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, ebx);
__ mov(eax, Operand(ecx)); __ mov(eax, ecx);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1); __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0); __ ret(0);
@ -381,51 +378,47 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits)); (1 << String::kArrayIndexValueBits));
__ bind(&index_string); __ bind(&index_string);
__ mov(eax, Operand(ebx)); __ and_(ebx, String::kArrayIndexHashMask);
__ and_(eax, String::kArrayIndexHashMask); __ shr(ebx, String::kHashShift);
__ shr(eax, String::kHashShift);
__ jmp(&index_int); __ jmp(&index_int);
} }
void KeyedLoadIC::GenerateString(MacroAssembler* masm) { void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address // -- esp[0] : return address
// -- esp[4] : key
// -- esp[8] : receiver
// ----------------------------------- // -----------------------------------
Label miss, index_ok; Label miss, index_ok;
// Pop return address. // Pop return address.
// Performing the load early is better in the common case. // Performing the load early is better in the common case.
__ pop(eax); __ pop(ebx);
__ mov(ebx, Operand(esp, 1 * kPointerSize)); __ test(edx, Immediate(kSmiTagMask));
__ test(ebx, Immediate(kSmiTagMask));
__ j(zero, &miss); __ j(zero, &miss);
__ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset)); __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ test(ecx, Immediate(kIsNotStringMask)); __ test(ecx, Immediate(kIsNotStringMask));
__ j(not_zero, &miss); __ j(not_zero, &miss);
// Check if key is a smi or a heap number. // Check if key is a smi or a heap number.
__ mov(edx, Operand(esp, 0)); __ test(eax, Immediate(kSmiTagMask));
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &index_ok); __ j(zero, &index_ok);
__ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset)); __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ cmp(ecx, Factory::heap_number_map()); __ cmp(ecx, Factory::heap_number_map());
__ j(not_equal, &miss); __ j(not_equal, &miss);
__ bind(&index_ok); __ bind(&index_ok);
// Duplicate receiver and key since they are expected on the stack after // Push receiver and key on the stack, and make a tail call.
// the KeyedLoadIC call. __ push(edx); // receiver
__ push(ebx); // receiver __ push(eax); // key
__ push(edx); // key __ push(ebx); // return address
__ push(eax); // return address
__ InvokeBuiltin(Builtins::STRING_CHAR_AT, JUMP_FUNCTION); __ InvokeBuiltin(Builtins::STRING_CHAR_AT, JUMP_FUNCTION);
__ bind(&miss); __ bind(&miss);
__ push(eax); __ push(ebx);
GenerateMiss(masm); GenerateMiss(masm);
} }
@ -433,18 +426,14 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) { ExternalArrayType array_type) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address // -- esp[0] : return address
// -- esp[4] : key
// -- esp[8] : receiver
// ----------------------------------- // -----------------------------------
Label slow, failed_allocation; Label slow, failed_allocation;
// Load name and receiver.
__ mov(eax, Operand(esp, kPointerSize));
__ mov(ecx, Operand(esp, 2 * kPointerSize));
// Check that the object isn't a smi. // Check that the object isn't a smi.
__ test(ecx, Immediate(kSmiTagMask)); __ test(edx, Immediate(kSmiTagMask));
__ j(zero, &slow, not_taken); __ j(zero, &slow, not_taken);
// Check that the key is a smi. // Check that the key is a smi.
@ -452,59 +441,56 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
__ j(not_zero, &slow, not_taken); __ j(not_zero, &slow, not_taken);
// Get the map of the receiver. // Get the map of the receiver.
__ mov(edx, FieldOperand(ecx, HeapObject::kMapOffset)); __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need // Check that the receiver does not require access checks. We need
// to check this explicitly since this generic stub does not perform // to check this explicitly since this generic stub does not perform
// map checks. // map checks.
__ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset)); __ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
__ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded)); __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &slow, not_taken); __ j(not_zero, &slow, not_taken);
// Get the instance type from the map of the receiver. __ CmpInstanceType(ecx, JS_OBJECT_TYPE);
__ movzx_b(edx, FieldOperand(edx, Map::kInstanceTypeOffset));
// Check that the object is a JS object.
__ cmp(edx, JS_OBJECT_TYPE);
__ j(not_equal, &slow, not_taken); __ j(not_equal, &slow, not_taken);
// Check that the elements array is the appropriate type of // Check that the elements array is the appropriate type of
// ExternalArray. // ExternalArray.
// eax: index (as a smi) __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
// ecx: JSObject
__ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
Handle<Map> map(Heap::MapForExternalArrayType(array_type)); Handle<Map> map(Heap::MapForExternalArrayType(array_type));
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset), __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(map)); Immediate(map));
__ j(not_equal, &slow, not_taken); __ j(not_equal, &slow, not_taken);
// eax: key, known to be a smi.
// edx: receiver, known to be a JSObject.
// ebx: elements object, known to be an external array.
// Check that the index is in range. // Check that the index is in range.
__ sar(eax, kSmiTagSize); // Untag the index. __ mov(ecx, eax);
__ cmp(eax, FieldOperand(ecx, ExternalArray::kLengthOffset)); __ SmiUntag(ecx); // Untag the index.
__ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
// Unsigned comparison catches both negative and too-large values. // Unsigned comparison catches both negative and too-large values.
__ j(above_equal, &slow); __ j(above_equal, &slow);
// eax: untagged index __ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
// ecx: elements array // ebx: base pointer of external storage
__ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
// ecx: base pointer of external storage
switch (array_type) { switch (array_type) {
case kExternalByteArray: case kExternalByteArray:
__ movsx_b(eax, Operand(ecx, eax, times_1, 0)); __ movsx_b(ecx, Operand(ebx, ecx, times_1, 0));
break; break;
case kExternalUnsignedByteArray: case kExternalUnsignedByteArray:
__ movzx_b(eax, Operand(ecx, eax, times_1, 0)); __ movzx_b(ecx, Operand(ebx, ecx, times_1, 0));
break; break;
case kExternalShortArray: case kExternalShortArray:
__ movsx_w(eax, Operand(ecx, eax, times_2, 0)); __ movsx_w(ecx, Operand(ebx, ecx, times_2, 0));
break; break;
case kExternalUnsignedShortArray: case kExternalUnsignedShortArray:
__ movzx_w(eax, Operand(ecx, eax, times_2, 0)); __ movzx_w(ecx, Operand(ebx, ecx, times_2, 0));
break; break;
case kExternalIntArray: case kExternalIntArray:
case kExternalUnsignedIntArray: case kExternalUnsignedIntArray:
__ mov(eax, Operand(ecx, eax, times_4, 0)); __ mov(ecx, Operand(ebx, ecx, times_4, 0));
break; break;
case kExternalFloatArray: case kExternalFloatArray:
__ fld_s(Operand(ecx, eax, times_4, 0)); __ fld_s(Operand(ebx, ecx, times_4, 0));
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -512,7 +498,7 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
} }
// For integer array types: // For integer array types:
// eax: value // ecx: value
// For floating-point array type: // For floating-point array type:
// FP(0): value // FP(0): value
@ -523,21 +509,19 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// it to a HeapNumber. // it to a HeapNumber.
Label box_int; Label box_int;
if (array_type == kExternalIntArray) { if (array_type == kExternalIntArray) {
// See Smi::IsValid for why this works. __ cmp(ecx, 0xC0000000);
__ mov(ebx, eax); __ j(sign, &box_int);
__ add(Operand(ebx), Immediate(0x40000000));
__ cmp(ebx, 0x80000000);
__ j(above_equal, &box_int);
} else { } else {
ASSERT_EQ(array_type, kExternalUnsignedIntArray); ASSERT_EQ(array_type, kExternalUnsignedIntArray);
// The test is different for unsigned int values. Since we need // The test is different for unsigned int values. Since we need
// the Smi-encoded result to be treated as unsigned, we can't // the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value. // handle either of the top two bits being set in the value.
__ test(eax, Immediate(0xC0000000)); __ test(ecx, Immediate(0xC0000000));
__ j(not_zero, &box_int); __ j(not_zero, &box_int);
} }
__ shl(eax, kSmiTagSize); __ mov(eax, ecx);
__ SmiTag(eax);
__ ret(0); __ ret(0);
__ bind(&box_int); __ bind(&box_int);
@ -545,34 +529,37 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Allocate a HeapNumber for the int and perform int-to-double // Allocate a HeapNumber for the int and perform int-to-double
// conversion. // conversion.
if (array_type == kExternalIntArray) { if (array_type == kExternalIntArray) {
__ push(eax); __ push(ecx);
__ fild_s(Operand(esp, 0)); __ fild_s(Operand(esp, 0));
__ pop(eax); __ pop(ecx);
} else { } else {
ASSERT(array_type == kExternalUnsignedIntArray); ASSERT(array_type == kExternalUnsignedIntArray);
// Need to zero-extend the value. // Need to zero-extend the value.
// There's no fild variant for unsigned values, so zero-extend // There's no fild variant for unsigned values, so zero-extend
// to a 64-bit int manually. // to a 64-bit int manually.
__ push(Immediate(0)); __ push(Immediate(0));
__ push(eax); __ push(ecx);
__ fild_d(Operand(esp, 0)); __ fild_d(Operand(esp, 0));
__ pop(eax); __ pop(ecx);
__ pop(eax); __ pop(ecx);
} }
// FP(0): value // FP(0): value
__ AllocateHeapNumber(eax, ebx, ecx, &failed_allocation); __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
// Set the value. // Set the value.
__ mov(eax, ecx);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0); __ ret(0);
} else if (array_type == kExternalFloatArray) { } else if (array_type == kExternalFloatArray) {
// For the floating-point array type, we need to always allocate a // For the floating-point array type, we need to always allocate a
// HeapNumber. // HeapNumber.
__ AllocateHeapNumber(eax, ebx, ecx, &failed_allocation); __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
// Set the value. // Set the value.
__ mov(eax, ecx);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0); __ ret(0);
} else { } else {
__ shl(eax, kSmiTagSize); __ mov(eax, ecx);
__ SmiTag(eax);
__ ret(0); __ ret(0);
} }
@ -583,10 +570,51 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
__ fincstp(); __ fincstp();
// Fall through to slow case. // Fall through to slow case.
// Slow case: Load name and receiver from stack and jump to runtime. // Slow case: Load key and receiver from stack and jump to runtime.
__ bind(&slow); __ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_external_array_slow, 1); __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
Generate(masm, ExternalReference(Runtime::kKeyedGetProperty)); GenerateRuntimeGetProperty(masm);
}
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label slow;
// Check that the receiver isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &slow, not_taken);
// Check that the key is a smi.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &slow, not_taken);
// Get the map of the receiver.
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
// Check that it has indexed interceptor and access checks
// are not enabled for this object.
__ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset));
__ and_(Operand(ecx), Immediate(kSlowCaseBitFieldMask));
__ cmp(Operand(ecx), Immediate(1 << Map::kHasIndexedInterceptor));
__ j(not_zero, &slow, not_taken);
// Everything is fine, call runtime.
__ pop(ecx);
__ push(edx); // receiver
__ push(eax); // key
__ push(ecx); // return address
// Perform tail call to the entry.
__ TailCallRuntime(ExternalReference(
IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1);
__ bind(&slow);
GenerateMiss(masm);
} }
@ -645,7 +673,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Slow case: call runtime. // Slow case: call runtime.
__ bind(&slow); __ bind(&slow);
Generate(masm, ExternalReference(Runtime::kSetProperty)); GenerateRuntimeSetProperty(masm);
// Check whether the elements is a pixel array. // Check whether the elements is a pixel array.
// eax: value // eax: value
@ -918,7 +946,7 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// Slow case: call runtime. // Slow case: call runtime.
__ bind(&slow); __ bind(&slow);
Generate(masm, ExternalReference(Runtime::kSetProperty)); GenerateRuntimeSetProperty(masm);
} }
@ -1001,7 +1029,7 @@ static void GenerateNormalHelper(MacroAssembler* masm,
// Search dictionary - put result in register edi. // Search dictionary - put result in register edi.
__ mov(edi, edx); __ mov(edi, edx);
GenerateDictionaryLoad(masm, miss, eax, edi, ebx, ecx, CHECK_DICTIONARY); GenerateDictionaryLoad(masm, miss, edx, ecx, eax, edi, ebx, CHECK_DICTIONARY);
// Check that the result is not a smi. // Check that the result is not a smi.
__ test(edi, Immediate(kSmiTagMask)); __ test(edi, Immediate(kSmiTagMask));
@ -1150,13 +1178,11 @@ Object* LoadIC_Miss(Arguments args);
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : receiver
// -- ecx : name // -- ecx : name
// -- esp[0] : return address // -- esp[0] : return address
// -- esp[4] : receiver
// ----------------------------------- // -----------------------------------
__ mov(eax, Operand(esp, kPointerSize));
// Probe the stub cache. // Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
NOT_IN_LOOP, NOT_IN_LOOP,
@ -1164,20 +1190,18 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
StubCache::GenerateProbe(masm, flags, eax, ecx, ebx, edx); StubCache::GenerateProbe(masm, flags, eax, ecx, ebx, edx);
// Cache miss: Jump to runtime. // Cache miss: Jump to runtime.
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss))); GenerateMiss(masm);
} }
void LoadIC::GenerateNormal(MacroAssembler* masm) { void LoadIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : receiver
// -- ecx : name // -- ecx : name
// -- esp[0] : return address // -- esp[0] : return address
// -- esp[4] : receiver
// ----------------------------------- // -----------------------------------
Label miss, probe, global; Label miss, probe, global;
__ mov(eax, Operand(esp, kPointerSize));
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ test(eax, Immediate(kSmiTagMask)); __ test(eax, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken); __ j(zero, &miss, not_taken);
@ -1202,8 +1226,16 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Search the dictionary placing the result in eax. // Search the dictionary placing the result in eax.
__ bind(&probe); __ bind(&probe);
GenerateDictionaryLoad(masm, &miss, edx, eax, ebx, ecx, CHECK_DICTIONARY); GenerateDictionaryLoad(masm,
GenerateCheckNonObjectOrLoaded(masm, &miss, eax, edx); &miss,
eax,
ecx,
edx,
edi,
ebx,
CHECK_DICTIONARY);
GenerateCheckNonObjectOrLoaded(masm, &miss, edi, edx);
__ mov(eax, edi);
__ ret(0); __ ret(0);
// Global object access: Check access rights. // Global object access: Check access rights.
@ -1213,37 +1245,24 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Cache miss: Restore receiver from stack and jump to runtime. // Cache miss: Restore receiver from stack and jump to runtime.
__ bind(&miss); __ bind(&miss);
__ mov(eax, Operand(esp, 1 * kPointerSize)); GenerateMiss(masm);
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
} }
void LoadIC::GenerateMiss(MacroAssembler* masm) { void LoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : receiver
// -- ecx : name // -- ecx : name
// -- esp[0] : return address // -- esp[0] : return address
// -- esp[4] : receiver
// -----------------------------------
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
}
void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[4] : receiver
// ----------------------------------- // -----------------------------------
__ mov(eax, Operand(esp, kPointerSize));
__ pop(ebx); __ pop(ebx);
__ push(eax); // receiver __ push(eax); // receiver
__ push(ecx); // name __ push(ecx); // name
__ push(ebx); // return address __ push(ebx); // return address
// Perform tail call to the entry. // Perform tail call to the entry.
__ TailCallRuntime(f, 2, 1); __ TailCallRuntime(ExternalReference(IC_Utility(kLoadIC_Miss)), 2, 1);
} }
@ -1347,31 +1366,35 @@ Object* KeyedLoadIC_Miss(Arguments args);
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address // -- esp[0] : return address
// -- esp[4] : name
// -- esp[8] : receiver
// ----------------------------------- // -----------------------------------
Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss))); __ pop(ebx);
__ push(edx); // receiver
__ push(eax); // name
__ push(ebx); // return address
// Perform tail call to the entry.
__ TailCallRuntime(ExternalReference(IC_Utility(kKeyedLoadIC_Miss)), 2, 1);
} }
void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) { void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address // -- esp[0] : return address
// -- esp[4] : name
// -- esp[8] : receiver
// ----------------------------------- // -----------------------------------
__ mov(eax, Operand(esp, kPointerSize));
__ mov(ecx, Operand(esp, 2 * kPointerSize));
__ pop(ebx); __ pop(ebx);
__ push(ecx); // receiver __ push(edx); // receiver
__ push(eax); // name __ push(eax); // name
__ push(ebx); // return address __ push(ebx); // return address
// Perform tail call to the entry. // Perform tail call to the entry.
__ TailCallRuntime(f, 2, 1); __ TailCallRuntime(ExternalReference(Runtime::kKeyedGetProperty), 2, 1);
} }
@ -1393,49 +1416,80 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
} }
void StoreIC::GenerateExtendStorage(MacroAssembler* masm) { void StoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : value // -- eax : value
// -- ecx : transition map // -- ecx : name
// -- edx : receiver // -- edx : receiver
// -- esp[0] : return address // -- esp[0] : return address
// ----------------------------------- // -----------------------------------
__ pop(ebx); __ pop(ebx);
__ push(edx); // receiver __ push(edx);
__ push(ecx); // transition map __ push(ecx);
__ push(eax); // value __ push(eax);
__ push(ebx); // return address __ push(ebx);
// Perform tail call to the entry. // Perform tail call to the entry.
__ TailCallRuntime( __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1);
ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
} }
void StoreIC::GenerateMiss(MacroAssembler* masm) { void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : value // -- eax : value
// -- ecx : name // -- ecx : name
// -- edx : receiver // -- edx : receiver
// -- esp[0] : return address // -- esp[0] : return address
// ----------------------------------- // -----------------------------------
//
// This accepts as a receiver anything JSObject::SetElementsLength accepts
// (currently anything except for external and pixel arrays which means
// anything with elements of FixedArray type.), but currently is restricted
// to JSArray.
// Value must be a number, but only smis are accepted as the most common case.
__ pop(ebx); Label miss;
__ push(edx);
__ push(ecx);
__ push(eax);
__ push(ebx);
// Perform tail call to the entry. Register receiver = edx;
__ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1); Register value = eax;
Register scratch = ebx;
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken);
// Check that the object is a JS array.
__ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
__ j(not_equal, &miss, not_taken);
// Check that elements are FixedArray.
__ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
__ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
__ j(not_equal, &miss, not_taken);
// Check that value is a smi.
__ test(value, Immediate(kSmiTagMask));
__ j(not_zero, &miss, not_taken);
// Prepare tail call to StoreIC_ArrayLength.
__ pop(scratch);
__ push(receiver);
__ push(value);
__ push(scratch); // return address
__ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_ArrayLength)), 2, 1);
__ bind(&miss);
GenerateMiss(masm);
} }
// Defined in ic.cc. // Defined in ic.cc.
Object* KeyedStoreIC_Miss(Arguments args); Object* KeyedStoreIC_Miss(Arguments args);
void KeyedStoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) { void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : value // -- eax : value
// -- esp[0] : return address // -- esp[0] : return address
@ -1450,28 +1504,26 @@ void KeyedStoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
__ push(ecx); __ push(ecx);
// Do tail-call to runtime routine. // Do tail-call to runtime routine.
__ TailCallRuntime(f, 3, 1); __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
} }
void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) { void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : value // -- eax : value
// -- ecx : transition map
// -- esp[0] : return address // -- esp[0] : return address
// -- esp[4] : key // -- esp[4] : key
// -- esp[8] : receiver // -- esp[8] : receiver
// ----------------------------------- // -----------------------------------
__ pop(ebx); __ pop(ecx);
__ push(Operand(esp, 1 * kPointerSize));
__ push(Operand(esp, 1 * kPointerSize)); __ push(Operand(esp, 1 * kPointerSize));
__ push(ecx);
__ push(eax); __ push(eax);
__ push(ebx); __ push(ecx);
// Do tail-call to runtime routine. // Do tail-call to runtime routine.
__ TailCallRuntime( __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedStoreIC_Miss)), 3, 1);
ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3, 1);
} }
#undef __ #undef __

149
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -41,7 +41,6 @@ namespace internal {
MacroAssembler::MacroAssembler(void* buffer, int size) MacroAssembler::MacroAssembler(void* buffer, int size)
: Assembler(buffer, size), : Assembler(buffer, size),
unresolved_(0),
generating_stub_(false), generating_stub_(false),
allow_stub_calls_(true), allow_stub_calls_(true),
code_object_(Heap::undefined_value()) { code_object_(Heap::undefined_value()) {
@ -308,6 +307,13 @@ void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
} }
} }
} }
void MacroAssembler::DebugBreak() {
Set(eax, Immediate(0));
mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak)));
CEntryStub ces(1);
call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
#endif #endif
void MacroAssembler::Set(Register dst, const Immediate& x) { void MacroAssembler::Set(Register dst, const Immediate& x) {
@ -338,6 +344,19 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
} }
void MacroAssembler::CheckMap(Register obj,
Handle<Map> map,
Label* fail,
bool is_heap_object) {
if (!is_heap_object) {
test(obj, Immediate(kSmiTagMask));
j(zero, fail);
}
cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
j(not_equal, fail);
}
Condition MacroAssembler::IsObjectStringType(Register heap_object, Condition MacroAssembler::IsObjectStringType(Register heap_object,
Register map, Register map,
Register instance_type) { Register instance_type) {
@ -364,6 +383,17 @@ void MacroAssembler::FCmp() {
} }
void MacroAssembler::AbortIfNotNumber(Register object, const char* msg) {
Label ok;
test(object, Immediate(kSmiTagMask));
j(zero, &ok);
cmp(FieldOperand(object, HeapObject::kMapOffset),
Factory::heap_number_map());
Assert(equal, msg);
bind(&ok);
}
void MacroAssembler::EnterFrame(StackFrame::Type type) { void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(ebp); push(ebp);
mov(ebp, Operand(esp)); mov(ebp, Operand(esp));
@ -396,12 +426,8 @@ void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode) {
// Reserve room for entry stack pointer and push the debug marker. // Reserve room for entry stack pointer and push the debug marker.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize); ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
push(Immediate(0)); // saved entry sp, patched before call push(Immediate(0)); // Saved entry sp, patched before call.
if (mode == ExitFrame::MODE_DEBUG) { push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
push(Immediate(0));
} else {
push(Immediate(CodeObject()));
}
// Save the frame pointer and the context in top. // Save the frame pointer and the context in top.
ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address); ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
@ -538,6 +564,7 @@ void MacroAssembler::PopTryHandler() {
Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg, Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
JSObject* holder, Register holder_reg, JSObject* holder, Register holder_reg,
Register scratch, Register scratch,
int save_at_depth,
Label* miss) { Label* miss) {
// Make sure there's no overlap between scratch and the other // Make sure there's no overlap between scratch and the other
// registers. // registers.
@ -545,7 +572,11 @@ Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
// Keep track of the current object in register reg. // Keep track of the current object in register reg.
Register reg = object_reg; Register reg = object_reg;
int depth = 1; int depth = 0;
if (save_at_depth == depth) {
mov(Operand(esp, kPointerSize), object_reg);
}
// Check the maps in the prototype chain. // Check the maps in the prototype chain.
// Traverse the prototype chain from the object and do map checks. // Traverse the prototype chain from the object and do map checks.
@ -577,7 +608,6 @@ Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
// to it in the code. Load it from the map. // to it in the code. Load it from the map.
reg = holder_reg; // from now the object is in holder_reg reg = holder_reg; // from now the object is in holder_reg
mov(reg, FieldOperand(scratch, Map::kPrototypeOffset)); mov(reg, FieldOperand(scratch, Map::kPrototypeOffset));
} else { } else {
// Check the map of the current object. // Check the map of the current object.
cmp(FieldOperand(reg, HeapObject::kMapOffset), cmp(FieldOperand(reg, HeapObject::kMapOffset),
@ -595,6 +625,10 @@ Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
mov(reg, Handle<JSObject>(prototype)); mov(reg, Handle<JSObject>(prototype));
} }
if (save_at_depth == depth) {
mov(Operand(esp, kPointerSize), reg);
}
// Go to the next object in the prototype chain. // Go to the next object in the prototype chain.
object = prototype; object = prototype;
} }
@ -605,7 +639,7 @@ Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
j(not_equal, miss, not_taken); j(not_equal, miss, not_taken);
// Log the check depth. // Log the check depth.
LOG(IntEvent("check-maps-depth", depth)); LOG(IntEvent("check-maps-depth", depth + 1));
// Perform security check for access to the global object and return // Perform security check for access to the global object and return
// the holder register. // the holder register.
@ -1122,6 +1156,16 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
} }
void MacroAssembler::CallExternalReference(ExternalReference ref,
int num_arguments) {
mov(eax, Immediate(num_arguments));
mov(ebx, Immediate(ref));
CEntryStub stub(1);
CallStub(&stub);
}
Object* MacroAssembler::TryCallRuntime(Runtime::Function* f, Object* MacroAssembler::TryCallRuntime(Runtime::Function* f,
int num_arguments) { int num_arguments) {
if (f->nargs >= 0 && f->nargs != num_arguments) { if (f->nargs >= 0 && f->nargs != num_arguments) {
@ -1342,10 +1386,22 @@ void MacroAssembler::InvokeFunction(Register fun,
} }
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) { void MacroAssembler::InvokeFunction(JSFunction* function,
bool resolved; const ParameterCount& actual,
Handle<Code> code = ResolveBuiltin(id, &resolved); InvokeFlag flag) {
ASSERT(function->is_compiled());
// Get the function and setup the context.
mov(edi, Immediate(Handle<JSFunction>(function)));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Invoke the cached code.
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
}
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
// Calls are not allowed in some stubs. // Calls are not allowed in some stubs.
ASSERT(flag == JUMP_FUNCTION || allow_stub_calls()); ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
@ -1353,55 +1409,22 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
// arguments match the expected number of arguments. Fake a // arguments match the expected number of arguments. Fake a
// parameter count to avoid emitting code to do the check. // parameter count to avoid emitting code to do the check.
ParameterCount expected(0); ParameterCount expected(0);
InvokeCode(Handle<Code>(code), expected, expected, GetBuiltinEntry(edx, id);
RelocInfo::CODE_TARGET, flag); InvokeCode(Operand(edx), expected, expected, flag);
const char* name = Builtins::GetName(id);
int argc = Builtins::GetArgumentsCount(id);
if (!resolved) {
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
Bootstrapper::FixupFlagsUseCodeObject::encode(false);
Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name };
unresolved_.Add(entry);
}
} }
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
bool resolved; // Load the JavaScript builtin function from the builtins object.
Handle<Code> code = ResolveBuiltin(id, &resolved); mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
mov(edi, FieldOperand(edi, GlobalObject::kBuiltinsOffset));
const char* name = Builtins::GetName(id);
int argc = Builtins::GetArgumentsCount(id);
mov(Operand(target), Immediate(code));
if (!resolved) {
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
Bootstrapper::FixupFlagsUseCodeObject::encode(true);
Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name };
unresolved_.Add(entry);
}
add(Operand(target), Immediate(Code::kHeaderSize - kHeapObjectTag));
}
Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
bool* resolved) {
// Move the builtin function into the temporary function slot by
// reading it from the builtins object. NOTE: We should be able to
// reduce this to two instructions by putting the function table in
// the global object instead of the "builtins" object and by using a
// real register for the function.
mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
mov(edx, FieldOperand(edx, GlobalObject::kBuiltinsOffset));
int builtins_offset = int builtins_offset =
JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize); JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
mov(edi, FieldOperand(edx, builtins_offset)); mov(edi, FieldOperand(edi, builtins_offset));
// Load the code entry point from the function into the target register.
return Builtins::GetCode(id, resolved); mov(target, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
mov(target, FieldOperand(target, SharedFunctionInfo::kCodeOffset));
add(Operand(target), Immediate(Code::kHeaderSize - kHeapObjectTag));
} }
@ -1546,6 +1569,20 @@ void MacroAssembler::Abort(const char* msg) {
} }
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
Register instance_type,
Register scratch,
Label *failure) {
if (!scratch.is(instance_type)) {
mov(scratch, instance_type);
}
and_(scratch,
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
cmp(scratch, kStringTag | kSeqStringTag | kAsciiStringTag);
j(not_equal, failure);
}
void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1, void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
Register object2, Register object2,
Register scratch1, Register scratch1,

57
deps/v8/src/ia32/macro-assembler-ia32.h

@ -69,6 +69,7 @@ class MacroAssembler: public Assembler {
void CopyRegistersFromStackToMemory(Register base, void CopyRegistersFromStackToMemory(Register base,
Register scratch, Register scratch,
RegList regs); RegList regs);
void DebugBreak();
#endif #endif
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@ -123,6 +124,10 @@ class MacroAssembler: public Assembler {
const ParameterCount& actual, const ParameterCount& actual,
InvokeFlag flag); InvokeFlag flag);
void InvokeFunction(JSFunction* function,
const ParameterCount& actual,
InvokeFlag flag);
// Invoke specified builtin JavaScript function. Adds an entry to // Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve. // the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag); void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
@ -141,6 +146,14 @@ class MacroAssembler: public Assembler {
// Compare instance type for map. // Compare instance type for map.
void CmpInstanceType(Register map, InstanceType type); void CmpInstanceType(Register map, InstanceType type);
// Check if the map of an object is equal to a specified map and
// branch to label if not. Skip the smi check if not required
// (object is known to be a heap object)
void CheckMap(Register obj,
Handle<Map> map,
Label* fail,
bool is_heap_object);
// Check if the object in register heap_object is a string. Afterwards the // Check if the object in register heap_object is a string. Afterwards the
// register map contains the object map and the register instance_type // register map contains the object map and the register instance_type
// contains the instance_type. The registers map and instance_type can be the // contains the instance_type. The registers map and instance_type can be the
@ -163,6 +176,9 @@ class MacroAssembler: public Assembler {
sar(reg, kSmiTagSize); sar(reg, kSmiTagSize);
} }
// Abort execution if argument is not a number. Used in debug code.
void AbortIfNotNumber(Register object, const char* msg);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Exception handling // Exception handling
@ -185,9 +201,14 @@ class MacroAssembler: public Assembler {
// clobbered if it the same as the holder register. The function // clobbered if it the same as the holder register. The function
// returns a register containing the holder - either object_reg or // returns a register containing the holder - either object_reg or
// holder_reg. // holder_reg.
// The function can optionally (when save_at_depth !=
// kInvalidProtoDepth) save the object at the given depth by moving
// it to [esp + kPointerSize].
Register CheckMaps(JSObject* object, Register object_reg, Register CheckMaps(JSObject* object, Register object_reg,
JSObject* holder, Register holder_reg, JSObject* holder, Register holder_reg,
Register scratch, Label* miss); Register scratch,
int save_at_depth,
Label* miss);
// Generate code for checking access rights - used for security checks // Generate code for checking access rights - used for security checks
// on access to global objects across environments. The holder register // on access to global objects across environments. The holder register
@ -339,6 +360,9 @@ class MacroAssembler: public Assembler {
// Convenience function: Same as above, but takes the fid instead. // Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId id, int num_arguments); void CallRuntime(Runtime::FunctionId id, int num_arguments);
// Convenience function: call an external reference.
void CallExternalReference(ExternalReference ref, int num_arguments);
// Convenience function: Same as above, but takes the fid instead. // Convenience function: Same as above, but takes the fid instead.
Object* TryCallRuntime(Runtime::FunctionId id, int num_arguments); Object* TryCallRuntime(Runtime::FunctionId id, int num_arguments);
@ -376,13 +400,6 @@ class MacroAssembler: public Assembler {
void Move(Register target, Handle<Object> value); void Move(Register target, Handle<Object> value);
struct Unresolved {
int pc;
uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
const char* name;
};
List<Unresolved>* unresolved() { return &unresolved_; }
Handle<Object> CodeObject() { return code_object_; } Handle<Object> CodeObject() { return code_object_; }
@ -418,6 +435,13 @@ class MacroAssembler: public Assembler {
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// String utilities. // String utilities.
// Check whether the instance type represents a flat ascii string. Jump to the
// label if not. If the instance type can be scratched specify same register
// for both instance type and scratch.
void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
Register scratch,
Label *on_not_flat_ascii_string);
// Checks if both objects are sequential ASCII strings, and jumps to label // Checks if both objects are sequential ASCII strings, and jumps to label
// if either is not. // if either is not.
void JumpIfNotBothSequentialAsciiStrings(Register object1, void JumpIfNotBothSequentialAsciiStrings(Register object1,
@ -427,11 +451,10 @@ class MacroAssembler: public Assembler {
Label *on_not_flat_ascii_strings); Label *on_not_flat_ascii_strings);
private: private:
List<Unresolved> unresolved_;
bool generating_stub_; bool generating_stub_;
bool allow_stub_calls_; bool allow_stub_calls_;
Handle<Object> code_object_; // This handle will be patched with the // This handle will be patched with the code object on installation.
// code object on installation. Handle<Object> code_object_;
// Helper functions for generating invokes. // Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected, void InvokePrologue(const ParameterCount& expected,
@ -441,18 +464,6 @@ class MacroAssembler: public Assembler {
Label* done, Label* done,
InvokeFlag flag); InvokeFlag flag);
// Prepares for a call or jump to a builtin by doing two things:
// 1. Emits code that fetches the builtin's function object from the context
// at runtime, and puts it in the register rdi.
// 2. Fetches the builtin's code object, and returns it in a handle, at
// compile time, so that later code can emit instructions to jump or call
// the builtin directly. If the code object has not yet been created, it
// returns the builtin code object for IllegalFunction, and sets the
// output parameter "resolved" to false. Code that uses the return value
// should then add the address and the builtin name to the list of fixups
// called unresolved_, which is fixed up by the bootstrapper.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
// Activation support. // Activation support.
void EnterFrame(StackFrame::Type type); void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type); void LeaveFrame(StackFrame::Type type);

634
deps/v8/src/ia32/stub-cache-ia32.cc

File diff suppressed because it is too large

107
deps/v8/src/ia32/virtual-frame-ia32.cc

@ -45,7 +45,7 @@ VirtualFrame::VirtualFrame()
: elements_(parameter_count() + local_count() + kPreallocatedElements), : elements_(parameter_count() + local_count() + kPreallocatedElements),
stack_pointer_(parameter_count() + 1) { // 0-based index of TOS. stack_pointer_(parameter_count() + 1) { // 0-based index of TOS.
for (int i = 0; i <= stack_pointer_; i++) { for (int i = 0; i <= stack_pointer_; i++) {
elements_.Add(FrameElement::MemoryElement()); elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown));
} }
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
register_locations_[i] = kIllegalIndex; register_locations_[i] = kIllegalIndex;
@ -173,10 +173,12 @@ void VirtualFrame::MakeMergable() {
for (int i = 0; i < element_count(); i++) { for (int i = 0; i < element_count(); i++) {
FrameElement element = elements_[i]; FrameElement element = elements_[i];
// All number type information is reset to unknown for a mergable frame
// because of incoming back edges.
if (element.is_constant() || element.is_copy()) { if (element.is_constant() || element.is_copy()) {
if (element.is_synced()) { if (element.is_synced()) {
// Just spill. // Just spill.
elements_[i] = FrameElement::MemoryElement(); elements_[i] = FrameElement::MemoryElement(NumberInfo::kUnknown);
} else { } else {
// Allocate to a register. // Allocate to a register.
FrameElement backing_element; // Invalid if not a copy. FrameElement backing_element; // Invalid if not a copy.
@ -187,7 +189,8 @@ void VirtualFrame::MakeMergable() {
ASSERT(fresh.is_valid()); // A register was spilled if all were in use. ASSERT(fresh.is_valid()); // A register was spilled if all were in use.
elements_[i] = elements_[i] =
FrameElement::RegisterElement(fresh.reg(), FrameElement::RegisterElement(fresh.reg(),
FrameElement::NOT_SYNCED); FrameElement::NOT_SYNCED,
NumberInfo::kUnknown);
Use(fresh.reg(), i); Use(fresh.reg(), i);
// Emit a move. // Emit a move.
@ -220,6 +223,7 @@ void VirtualFrame::MakeMergable() {
// The copy flag is not relied on before the end of this loop, // The copy flag is not relied on before the end of this loop,
// including when registers are spilled. // including when registers are spilled.
elements_[i].clear_copied(); elements_[i].clear_copied();
elements_[i].set_number_info(NumberInfo::kUnknown);
} }
} }
} }
@ -607,10 +611,14 @@ int VirtualFrame::InvalidateFrameSlotAt(int index) {
// Set the new backing element. // Set the new backing element.
if (elements_[new_backing_index].is_synced()) { if (elements_[new_backing_index].is_synced()) {
elements_[new_backing_index] = elements_[new_backing_index] =
FrameElement::RegisterElement(backing_reg, FrameElement::SYNCED); FrameElement::RegisterElement(backing_reg,
FrameElement::SYNCED,
original.number_info());
} else { } else {
elements_[new_backing_index] = elements_[new_backing_index] =
FrameElement::RegisterElement(backing_reg, FrameElement::NOT_SYNCED); FrameElement::RegisterElement(backing_reg,
FrameElement::NOT_SYNCED,
original.number_info());
} }
// Update the other copies. // Update the other copies.
for (int i = new_backing_index + 1; i < element_count(); i++) { for (int i = new_backing_index + 1; i < element_count(); i++) {
@ -641,7 +649,8 @@ void VirtualFrame::TakeFrameSlotAt(int index) {
ASSERT(fresh.is_valid()); ASSERT(fresh.is_valid());
FrameElement new_element = FrameElement new_element =
FrameElement::RegisterElement(fresh.reg(), FrameElement::RegisterElement(fresh.reg(),
FrameElement::NOT_SYNCED); FrameElement::NOT_SYNCED,
original.number_info());
Use(fresh.reg(), element_count()); Use(fresh.reg(), element_count());
elements_.Add(new_element); elements_.Add(new_element);
__ mov(fresh.reg(), Operand(ebp, fp_relative(index))); __ mov(fresh.reg(), Operand(ebp, fp_relative(index)));
@ -853,6 +862,17 @@ Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
} }
#ifdef ENABLE_DEBUGGER_SUPPORT
void VirtualFrame::DebugBreak() {
PrepareForCall(0, 0);
ASSERT(cgen()->HasValidEntryRegisters());
__ DebugBreak();
Result result = cgen()->allocator()->Allocate(eax);
ASSERT(result.is_valid());
}
#endif
Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id, Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag, InvokeFlag flag,
int arg_count) { int arg_count) {
@ -877,22 +897,53 @@ Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) { Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
// Name and receiver are on the top of the frame. The IC expects // Name and receiver are on the top of the frame. The IC expects
// name in ecx and receiver on the stack. It does not drop the // name in ecx and receiver in eax.
// receiver.
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
Result name = Pop(); Result name = Pop();
PrepareForCall(1, 0); // One stack arg, not callee-dropped. Result receiver = Pop();
name.ToRegister(ecx); PrepareForCall(0, 0); // No stack arguments.
// Move results to the right registers:
if (name.is_register() && name.reg().is(eax)) {
if (receiver.is_register() && receiver.reg().is(ecx)) {
// Wrong registers.
__ xchg(eax, ecx);
} else {
// Register ecx is free for name, which frees eax for receiver.
name.ToRegister(ecx);
receiver.ToRegister(eax);
}
} else {
// Register eax is free for receiver, which frees ecx for name.
receiver.ToRegister(eax);
name.ToRegister(ecx);
}
name.Unuse(); name.Unuse();
receiver.Unuse();
return RawCallCodeObject(ic, mode); return RawCallCodeObject(ic, mode);
} }
Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) { Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
// Key and receiver are on top of the frame. The IC expects them on // Key and receiver are on top of the frame. Put them in eax and edx.
// the stack. It does not drop them. Result key = Pop();
Result receiver = Pop();
PrepareForCall(0, 0);
if (!key.is_register() || !key.reg().is(edx)) {
// Register edx is available for receiver.
receiver.ToRegister(edx);
key.ToRegister(eax);
} else if (!receiver.is_register() || !receiver.reg().is(eax)) {
// Register eax is available for key.
key.ToRegister(eax);
receiver.ToRegister(edx);
} else {
__ xchg(edx, eax);
}
key.Unuse();
receiver.Unuse();
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
PrepareForCall(2, 0); // Two stack args, neither callee-dropped.
return RawCallCodeObject(ic, mode); return RawCallCodeObject(ic, mode);
} }
@ -947,7 +998,6 @@ Result VirtualFrame::CallKeyedStoreIC() {
// expects value in eax and key and receiver on the stack. It does // expects value in eax and key and receiver on the stack. It does
// not drop the key and receiver. // not drop the key and receiver.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
// TODO(1222589): Make the IC grab the values from the stack.
Result value = Pop(); Result value = Pop();
PrepareForCall(2, 0); // Two stack args, neither callee-dropped. PrepareForCall(2, 0); // Two stack args, neither callee-dropped.
value.ToRegister(eax); value.ToRegister(eax);
@ -1025,6 +1075,14 @@ Result VirtualFrame::Pop() {
int index = element_count(); int index = element_count();
ASSERT(element.is_valid()); ASSERT(element.is_valid());
// Get number type information of the result.
NumberInfo::Type info;
if (!element.is_copy()) {
info = element.number_info();
} else {
info = elements_[element.index()].number_info();
}
bool pop_needed = (stack_pointer_ == index); bool pop_needed = (stack_pointer_ == index);
if (pop_needed) { if (pop_needed) {
stack_pointer_--; stack_pointer_--;
@ -1032,6 +1090,7 @@ Result VirtualFrame::Pop() {
Result temp = cgen()->allocator()->Allocate(); Result temp = cgen()->allocator()->Allocate();
ASSERT(temp.is_valid()); ASSERT(temp.is_valid());
__ pop(temp.reg()); __ pop(temp.reg());
temp.set_number_info(info);
return temp; return temp;
} }
@ -1059,14 +1118,16 @@ Result VirtualFrame::Pop() {
ASSERT(temp.is_valid()); ASSERT(temp.is_valid());
Use(temp.reg(), index); Use(temp.reg(), index);
FrameElement new_element = FrameElement new_element =
FrameElement::RegisterElement(temp.reg(), FrameElement::SYNCED); FrameElement::RegisterElement(temp.reg(),
FrameElement::SYNCED,
element.number_info());
// Preserve the copy flag on the element. // Preserve the copy flag on the element.
if (element.is_copied()) new_element.set_copied(); if (element.is_copied()) new_element.set_copied();
elements_[index] = new_element; elements_[index] = new_element;
__ mov(temp.reg(), Operand(ebp, fp_relative(index))); __ mov(temp.reg(), Operand(ebp, fp_relative(index)));
return Result(temp.reg()); return Result(temp.reg(), info);
} else if (element.is_register()) { } else if (element.is_register()) {
return Result(element.reg()); return Result(element.reg(), info);
} else { } else {
ASSERT(element.is_constant()); ASSERT(element.is_constant());
return Result(element.handle()); return Result(element.handle());
@ -1090,25 +1151,25 @@ void VirtualFrame::EmitPop(Operand operand) {
} }
void VirtualFrame::EmitPush(Register reg) { void VirtualFrame::EmitPush(Register reg, NumberInfo::Type info) {
ASSERT(stack_pointer_ == element_count() - 1); ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement()); elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++; stack_pointer_++;
__ push(reg); __ push(reg);
} }
void VirtualFrame::EmitPush(Operand operand) { void VirtualFrame::EmitPush(Operand operand, NumberInfo::Type info) {
ASSERT(stack_pointer_ == element_count() - 1); ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement()); elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++; stack_pointer_++;
__ push(operand); __ push(operand);
} }
void VirtualFrame::EmitPush(Immediate immediate) { void VirtualFrame::EmitPush(Immediate immediate, NumberInfo::Type info) {
ASSERT(stack_pointer_ == element_count() - 1); ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement()); elements_.Add(FrameElement::MemoryElement(info));
stack_pointer_++; stack_pointer_++;
__ push(immediate); __ push(immediate);
} }

23
deps/v8/src/ia32/virtual-frame-ia32.h

@ -28,6 +28,7 @@
#ifndef V8_IA32_VIRTUAL_FRAME_IA32_H_ #ifndef V8_IA32_VIRTUAL_FRAME_IA32_H_
#define V8_IA32_VIRTUAL_FRAME_IA32_H_ #define V8_IA32_VIRTUAL_FRAME_IA32_H_
#include "number-info.h"
#include "register-allocator.h" #include "register-allocator.h"
#include "scopes.h" #include "scopes.h"
@ -82,7 +83,8 @@ class VirtualFrame: public ZoneObject {
MacroAssembler* masm() { return cgen()->masm(); } MacroAssembler* masm() { return cgen()->masm(); }
// Create a duplicate of an existing valid frame element. // Create a duplicate of an existing valid frame element.
FrameElement CopyElementAt(int index); FrameElement CopyElementAt(int index,
NumberInfo::Type info = NumberInfo::kUninitialized);
// The number of elements on the virtual frame. // The number of elements on the virtual frame.
int element_count() { return elements_.length(); } int element_count() { return elements_.length(); }
@ -324,12 +326,16 @@ class VirtualFrame: public ZoneObject {
Result CallRuntime(Runtime::Function* f, int arg_count); Result CallRuntime(Runtime::Function* f, int arg_count);
Result CallRuntime(Runtime::FunctionId id, int arg_count); Result CallRuntime(Runtime::FunctionId id, int arg_count);
#ifdef ENABLE_DEBUGGER_SUPPORT
void DebugBreak();
#endif
// Invoke builtin given the number of arguments it expects on (and // Invoke builtin given the number of arguments it expects on (and
// removes from) the stack. // removes from) the stack.
Result InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, int arg_count); Result InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, int arg_count);
// Call load IC. Name and receiver are found on top of the frame. // Call load IC. Name and receiver are found on top of the frame.
// Receiver is not dropped. // Both are dropped.
Result CallLoadIC(RelocInfo::Mode mode); Result CallLoadIC(RelocInfo::Mode mode);
// Call keyed load IC. Key and receiver are found on top of the // Call keyed load IC. Key and receiver are found on top of the
@ -381,12 +387,15 @@ class VirtualFrame: public ZoneObject {
// Push an element on top of the expression stack and emit a // Push an element on top of the expression stack and emit a
// corresponding push instruction. // corresponding push instruction.
void EmitPush(Register reg); void EmitPush(Register reg,
void EmitPush(Operand operand); NumberInfo::Type info = NumberInfo::kUnknown);
void EmitPush(Immediate immediate); void EmitPush(Operand operand,
NumberInfo::Type info = NumberInfo::kUnknown);
void EmitPush(Immediate immediate,
NumberInfo::Type info = NumberInfo::kUnknown);
// Push an element on the virtual frame. // Push an element on the virtual frame.
void Push(Register reg); void Push(Register reg, NumberInfo::Type info = NumberInfo::kUnknown);
void Push(Handle<Object> value); void Push(Handle<Object> value);
void Push(Smi* value) { void Push(Smi* value) {
Push(Handle<Object> (value)); Push(Handle<Object> (value));
@ -398,7 +407,7 @@ class VirtualFrame: public ZoneObject {
// This assert will trigger if you try to push the same value twice. // This assert will trigger if you try to push the same value twice.
ASSERT(result->is_valid()); ASSERT(result->is_valid());
if (result->is_register()) { if (result->is_register()) {
Push(result->reg()); Push(result->reg(), result->number_info());
} else { } else {
ASSERT(result->is_constant()); ASSERT(result->is_constant());
Push(result->handle()); Push(result->handle());

94
deps/v8/src/ic.cc

@ -330,10 +330,11 @@ static void LookupForRead(Object* object,
while (true) { while (true) {
object->Lookup(name, lookup); object->Lookup(name, lookup);
// Besides normal conditions (property not found or it's not // Besides normal conditions (property not found or it's not
// an interceptor), bail out of lookup is not cacheable: we won't // an interceptor), bail out if lookup is not cacheable: we won't
// be able to IC it anyway and regular lookup should work fine. // be able to IC it anyway and regular lookup should work fine.
if (lookup->IsNotFound() || lookup->type() != INTERCEPTOR || if (!lookup->IsFound()
!lookup->IsCacheable()) { || (lookup->type() != INTERCEPTOR)
|| !lookup->IsCacheable()) {
return; return;
} }
@ -343,7 +344,7 @@ static void LookupForRead(Object* object,
} }
holder->LocalLookupRealNamedProperty(name, lookup); holder->LocalLookupRealNamedProperty(name, lookup);
if (lookup->IsValid()) { if (lookup->IsProperty()) {
ASSERT(lookup->type() != INTERCEPTOR); ASSERT(lookup->type() != INTERCEPTOR);
return; return;
} }
@ -422,7 +423,7 @@ Object* CallIC::LoadFunction(State state,
LookupResult lookup; LookupResult lookup;
LookupForRead(*object, *name, &lookup); LookupForRead(*object, *name, &lookup);
if (!lookup.IsValid()) { if (!lookup.IsProperty()) {
// If the object does not have the requested property, check which // If the object does not have the requested property, check which
// exception we need to throw. // exception we need to throw.
if (IsContextual(object)) { if (IsContextual(object)) {
@ -455,7 +456,7 @@ Object* CallIC::LoadFunction(State state,
if (result->IsJSFunction()) { if (result->IsJSFunction()) {
// Check if there is an optimized (builtin) version of the function. // Check if there is an optimized (builtin) version of the function.
// Ignored this will degrade performance for Array.prototype.{push,pop}. // Ignored this will degrade performance for some Array functions.
// Please note we only return the optimized function iff // Please note we only return the optimized function iff
// the JSObject has FastElements. // the JSObject has FastElements.
if (object->IsJSObject() && JSObject::cast(*object)->HasFastElements()) { if (object->IsJSObject() && JSObject::cast(*object)->HasFastElements()) {
@ -493,7 +494,7 @@ void CallIC::UpdateCaches(LookupResult* lookup,
Handle<String> name) { Handle<String> name) {
ASSERT(lookup->IsLoaded()); ASSERT(lookup->IsLoaded());
// Bail out if we didn't find a result. // Bail out if we didn't find a result.
if (!lookup->IsValid() || !lookup->IsCacheable()) return; if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
// Compute the number of arguments. // Compute the number of arguments.
int argc = target()->arguments_count(); int argc = target()->arguments_count();
@ -642,8 +643,8 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
LookupResult lookup; LookupResult lookup;
LookupForRead(*object, *name, &lookup); LookupForRead(*object, *name, &lookup);
// If lookup is invalid, check if we need to throw an exception. // If we did not find a property, check if we need to throw an exception.
if (!lookup.IsValid()) { if (!lookup.IsProperty()) {
if (FLAG_strict || IsContextual(object)) { if (FLAG_strict || IsContextual(object)) {
return ReferenceError("not_defined", name); return ReferenceError("not_defined", name);
} }
@ -653,7 +654,7 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
bool can_be_inlined = bool can_be_inlined =
FLAG_use_ic && FLAG_use_ic &&
state == PREMONOMORPHIC && state == PREMONOMORPHIC &&
lookup.IsValid() && lookup.IsProperty() &&
lookup.IsLoaded() && lookup.IsLoaded() &&
lookup.IsCacheable() && lookup.IsCacheable() &&
lookup.holder() == *object && lookup.holder() == *object &&
@ -681,7 +682,7 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
} }
PropertyAttributes attr; PropertyAttributes attr;
if (lookup.IsValid() && lookup.type() == INTERCEPTOR) { if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) {
// Get the property. // Get the property.
Object* result = object->GetProperty(*object, &lookup, *name, &attr); Object* result = object->GetProperty(*object, &lookup, *name, &attr);
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
@ -704,7 +705,7 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
Handle<String> name) { Handle<String> name) {
ASSERT(lookup->IsLoaded()); ASSERT(lookup->IsLoaded());
// Bail out if we didn't find a result. // Bail out if we didn't find a result.
if (!lookup->IsValid() || !lookup->IsCacheable()) return; if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
// Loading properties from values is not common, so don't try to // Loading properties from values is not common, so don't try to
// deal with non-JS objects here. // deal with non-JS objects here.
@ -857,8 +858,8 @@ Object* KeyedLoadIC::Load(State state,
LookupResult lookup; LookupResult lookup;
LookupForRead(*object, *name, &lookup); LookupForRead(*object, *name, &lookup);
// If lookup is invalid, check if we need to throw an exception. // If we did not find a property, check if we need to throw an exception.
if (!lookup.IsValid()) { if (!lookup.IsProperty()) {
if (FLAG_strict || IsContextual(object)) { if (FLAG_strict || IsContextual(object)) {
return ReferenceError("not_defined", name); return ReferenceError("not_defined", name);
} }
@ -869,7 +870,7 @@ Object* KeyedLoadIC::Load(State state,
} }
PropertyAttributes attr; PropertyAttributes attr;
if (lookup.IsValid() && lookup.type() == INTERCEPTOR) { if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) {
// Get the property. // Get the property.
Object* result = object->GetProperty(*object, &lookup, *name, &attr); Object* result = object->GetProperty(*object, &lookup, *name, &attr);
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
@ -896,6 +897,8 @@ Object* KeyedLoadIC::Load(State state,
Handle<JSObject> receiver = Handle<JSObject>::cast(object); Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->HasExternalArrayElements()) { if (receiver->HasExternalArrayElements()) {
stub = external_array_stub(receiver->GetElementsKind()); stub = external_array_stub(receiver->GetElementsKind());
} else if (receiver->HasIndexedInterceptor()) {
stub = indexed_interceptor_stub();
} }
} }
set_target(stub); set_target(stub);
@ -919,7 +922,7 @@ void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
Handle<Object> object, Handle<String> name) { Handle<Object> object, Handle<String> name) {
ASSERT(lookup->IsLoaded()); ASSERT(lookup->IsLoaded());
// Bail out if we didn't find a result. // Bail out if we didn't find a result.
if (!lookup->IsValid() || !lookup->IsCacheable()) return; if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
if (!object->IsJSObject()) return; if (!object->IsJSObject()) return;
Handle<JSObject> receiver = Handle<JSObject>::cast(object); Handle<JSObject> receiver = Handle<JSObject>::cast(object);
@ -992,7 +995,7 @@ void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
static bool StoreICableLookup(LookupResult* lookup) { static bool StoreICableLookup(LookupResult* lookup) {
// Bail out if we didn't find a result. // Bail out if we didn't find a result.
if (!lookup->IsValid() || !lookup->IsCacheable()) return false; if (!lookup->IsPropertyOrTransition() || !lookup->IsCacheable()) return false;
// If the property is read-only, we leave the IC in its current // If the property is read-only, we leave the IC in its current
// state. // state.
@ -1046,6 +1049,20 @@ Object* StoreIC::Store(State state,
return *value; return *value;
} }
// Use specialized code for setting the length of arrays.
if (receiver->IsJSArray()
&& name->Equals(Heap::length_symbol())
&& receiver->AllowsSetElementsLength()) {
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n");
#endif
Code* target = Builtins::builtin(Builtins::StoreIC_ArrayLength);
set_target(target);
StubCache::Set(*name, HeapObject::cast(*object)->map(), target);
return receiver->SetProperty(*name, *value, NONE);
}
// Lookup the property locally in the receiver. // Lookup the property locally in the receiver.
if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) { if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
LookupResult lookup; LookupResult lookup;
@ -1212,7 +1229,7 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
if (receiver->IsJSGlobalProxy()) return; if (receiver->IsJSGlobalProxy()) return;
// Bail out if we didn't find a result. // Bail out if we didn't find a result.
if (!lookup->IsValid() || !lookup->IsCacheable()) return; if (!lookup->IsPropertyOrTransition() || !lookup->IsCacheable()) return;
// If the property is read-only, we leave the IC in its current // If the property is read-only, we leave the IC in its current
// state. // state.
@ -1320,16 +1337,6 @@ Object* LoadIC_Miss(Arguments args) {
} }
void LoadIC::GenerateInitialize(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
}
void LoadIC::GeneratePreMonomorphic(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
}
// Used from ic_<arch>.cc // Used from ic_<arch>.cc
Object* KeyedLoadIC_Miss(Arguments args) { Object* KeyedLoadIC_Miss(Arguments args) {
NoHandleAllocation na; NoHandleAllocation na;
@ -1340,16 +1347,6 @@ Object* KeyedLoadIC_Miss(Arguments args) {
} }
void KeyedLoadIC::GenerateInitialize(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
}
void KeyedLoadIC::GeneratePreMonomorphic(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
}
// Used from ic_<arch>.cc. // Used from ic_<arch>.cc.
Object* StoreIC_Miss(Arguments args) { Object* StoreIC_Miss(Arguments args) {
NoHandleAllocation na; NoHandleAllocation na;
@ -1361,6 +1358,17 @@ Object* StoreIC_Miss(Arguments args) {
} }
Object* StoreIC_ArrayLength(Arguments args) {
NoHandleAllocation nha;
ASSERT(args.length() == 2);
JSObject* receiver = JSObject::cast(args[0]);
Object* len = args[1];
return receiver->SetElementsLength(len);
}
// Extend storage is called in a store inline cache when // Extend storage is called in a store inline cache when
// it is necessary to extend the properties array of a // it is necessary to extend the properties array of a
// JSObject. // JSObject.
@ -1406,16 +1414,6 @@ Object* KeyedStoreIC_Miss(Arguments args) {
} }
void KeyedStoreIC::GenerateInitialize(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kKeyedStoreIC_Miss)));
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kKeyedStoreIC_Miss)));
}
static Address IC_utilities[] = { static Address IC_utilities[] = {
#define ADDR(name) FUNCTION_ADDR(name), #define ADDR(name) FUNCTION_ADDR(name),
IC_UTIL_LIST(ADDR) IC_UTIL_LIST(ADDR)

32
deps/v8/src/ic.h

@ -45,6 +45,7 @@ enum DictionaryCheck { CHECK_DICTIONARY, DICTIONARY_CHECK_DONE };
ICU(KeyedLoadIC_Miss) \ ICU(KeyedLoadIC_Miss) \
ICU(CallIC_Miss) \ ICU(CallIC_Miss) \
ICU(StoreIC_Miss) \ ICU(StoreIC_Miss) \
ICU(StoreIC_ArrayLength) \
ICU(SharedStoreIC_ExtendStorage) \ ICU(SharedStoreIC_ExtendStorage) \
ICU(KeyedStoreIC_Miss) \ ICU(KeyedStoreIC_Miss) \
/* Utilities for IC stubs. */ \ /* Utilities for IC stubs. */ \
@ -53,6 +54,7 @@ enum DictionaryCheck { CHECK_DICTIONARY, DICTIONARY_CHECK_DONE };
ICU(LoadPropertyWithInterceptorOnly) \ ICU(LoadPropertyWithInterceptorOnly) \
ICU(LoadPropertyWithInterceptorForLoad) \ ICU(LoadPropertyWithInterceptorForLoad) \
ICU(LoadPropertyWithInterceptorForCall) \ ICU(LoadPropertyWithInterceptorForCall) \
ICU(KeyedLoadPropertyWithInterceptor) \
ICU(StoreInterceptorProperty) ICU(StoreInterceptorProperty)
// //
@ -223,8 +225,10 @@ class LoadIC: public IC {
Object* Load(State state, Handle<Object> object, Handle<String> name); Object* Load(State state, Handle<Object> object, Handle<String> name);
// Code generator routines. // Code generator routines.
static void GenerateInitialize(MacroAssembler* masm); static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GeneratePreMonomorphic(MacroAssembler* masm); static void GeneratePreMonomorphic(MacroAssembler* masm) {
GenerateMiss(masm);
}
static void GenerateMiss(MacroAssembler* masm); static void GenerateMiss(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm); static void GenerateMegamorphic(MacroAssembler* masm);
static void GenerateNormal(MacroAssembler* masm); static void GenerateNormal(MacroAssembler* masm);
@ -240,8 +244,6 @@ class LoadIC: public IC {
static const int kOffsetToLoadInstruction; static const int kOffsetToLoadInstruction;
private: private:
static void Generate(MacroAssembler* masm, const ExternalReference& f);
// Update the inline cache and the global stub cache based on the // Update the inline cache and the global stub cache based on the
// lookup result. // lookup result.
void UpdateCaches(LookupResult* lookup, void UpdateCaches(LookupResult* lookup,
@ -279,8 +281,11 @@ class KeyedLoadIC: public IC {
// Code generator routines. // Code generator routines.
static void GenerateMiss(MacroAssembler* masm); static void GenerateMiss(MacroAssembler* masm);
static void GenerateInitialize(MacroAssembler* masm); static void GenerateRuntimeGetProperty(MacroAssembler* masm);
static void GeneratePreMonomorphic(MacroAssembler* masm); static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GeneratePreMonomorphic(MacroAssembler* masm) {
GenerateMiss(masm);
}
static void GenerateGeneric(MacroAssembler* masm); static void GenerateGeneric(MacroAssembler* masm);
static void GenerateString(MacroAssembler* masm); static void GenerateString(MacroAssembler* masm);
@ -290,6 +295,7 @@ class KeyedLoadIC: public IC {
// for all other types. // for all other types.
static void GenerateExternalArray(MacroAssembler* masm, static void GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type); ExternalArrayType array_type);
static void GenerateIndexedInterceptor(MacroAssembler* masm);
// Clear the use of the inlined version. // Clear the use of the inlined version.
static void ClearInlinedVersion(Address address); static void ClearInlinedVersion(Address address);
@ -302,8 +308,6 @@ class KeyedLoadIC: public IC {
static const int kSlowCaseBitFieldMask = static const int kSlowCaseBitFieldMask =
(1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor); (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
static void Generate(MacroAssembler* masm, const ExternalReference& f);
// Update the inline cache. // Update the inline cache.
void UpdateCaches(LookupResult* lookup, void UpdateCaches(LookupResult* lookup,
State state, State state,
@ -328,6 +332,10 @@ class KeyedLoadIC: public IC {
} }
static Code* external_array_stub(JSObject::ElementsKind elements_kind); static Code* external_array_stub(JSObject::ElementsKind elements_kind);
static Code* indexed_interceptor_stub() {
return Builtins::builtin(Builtins::KeyedLoadIC_IndexedInterceptor);
}
static void Clear(Address address, Code* target); static void Clear(Address address, Code* target);
// Support for patching the map that is checked in an inlined // Support for patching the map that is checked in an inlined
@ -351,7 +359,7 @@ class StoreIC: public IC {
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); } static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GenerateMiss(MacroAssembler* masm); static void GenerateMiss(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm); static void GenerateMegamorphic(MacroAssembler* masm);
static void GenerateExtendStorage(MacroAssembler* masm); static void GenerateArrayLength(MacroAssembler* masm);
private: private:
// Update the inline cache and the global stub cache based on the // Update the inline cache and the global stub cache based on the
@ -384,10 +392,10 @@ class KeyedStoreIC: public IC {
Handle<Object> value); Handle<Object> value);
// Code generators for stub routines. Only called once at startup. // Code generators for stub routines. Only called once at startup.
static void GenerateInitialize(MacroAssembler* masm); static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GenerateMiss(MacroAssembler* masm); static void GenerateMiss(MacroAssembler* masm);
static void GenerateRuntimeSetProperty(MacroAssembler* masm);
static void GenerateGeneric(MacroAssembler* masm); static void GenerateGeneric(MacroAssembler* masm);
static void GenerateExtendStorage(MacroAssembler* masm);
// Generators for external array types. See objects.h. // Generators for external array types. See objects.h.
// These are similar to the generic IC; they optimize the case of // These are similar to the generic IC; they optimize the case of
@ -403,8 +411,6 @@ class KeyedStoreIC: public IC {
static void RestoreInlinedVersion(Address address); static void RestoreInlinedVersion(Address address);
private: private:
static void Generate(MacroAssembler* masm, const ExternalReference& f);
// Update the inline cache. // Update the inline cache.
void UpdateCaches(LookupResult* lookup, void UpdateCaches(LookupResult* lookup,
State state, State state,

36
deps/v8/src/json-delay.js

@ -80,8 +80,9 @@ var characterQuoteCache = {
}; };
function QuoteSingleJSONCharacter(c) { function QuoteSingleJSONCharacter(c) {
if (c in characterQuoteCache) if (c in characterQuoteCache) {
return characterQuoteCache[c]; return characterQuoteCache[c];
}
var charCode = c.charCodeAt(0); var charCode = c.charCodeAt(0);
var result; var result;
if (charCode < 16) result = '\\u000'; if (charCode < 16) result = '\\u000';
@ -101,15 +102,17 @@ function QuoteJSONString(str) {
function StackContains(stack, val) { function StackContains(stack, val) {
var length = stack.length; var length = stack.length;
for (var i = 0; i < length; i++) { for (var i = 0; i < length; i++) {
if (stack[i] === val) if (stack[i] === val) {
return true; return true;
}
} }
return false; return false;
} }
function SerializeArray(value, replacer, stack, indent, gap) { function SerializeArray(value, replacer, stack, indent, gap) {
if (StackContains(stack, value)) if (StackContains(stack, value)) {
throw MakeTypeError('circular_structure', []); throw MakeTypeError('circular_structure', []);
}
stack.push(value); stack.push(value);
var stepback = indent; var stepback = indent;
indent += gap; indent += gap;
@ -117,9 +120,10 @@ function SerializeArray(value, replacer, stack, indent, gap) {
var len = value.length; var len = value.length;
for (var i = 0; i < len; i++) { for (var i = 0; i < len; i++) {
var strP = JSONSerialize($String(i), value, replacer, stack, var strP = JSONSerialize($String(i), value, replacer, stack,
indent, gap); indent, gap);
if (IS_UNDEFINED(strP)) if (IS_UNDEFINED(strP)) {
strP = "null"; strP = "null";
}
partial.push(strP); partial.push(strP);
} }
var final; var final;
@ -137,8 +141,9 @@ function SerializeArray(value, replacer, stack, indent, gap) {
} }
function SerializeObject(value, replacer, stack, indent, gap) { function SerializeObject(value, replacer, stack, indent, gap) {
if (StackContains(stack, value)) if (StackContains(stack, value)) {
throw MakeTypeError('circular_structure', []); throw MakeTypeError('circular_structure', []);
}
stack.push(value); stack.push(value);
var stepback = indent; var stepback = indent;
indent += gap; indent += gap;
@ -188,17 +193,21 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
var value = holder[key]; var value = holder[key];
if (IS_OBJECT(value) && value) { if (IS_OBJECT(value) && value) {
var toJSON = value.toJSON; var toJSON = value.toJSON;
if (IS_FUNCTION(toJSON)) if (IS_FUNCTION(toJSON)) {
value = toJSON.call(value, key); value = toJSON.call(value, key);
}
} }
if (IS_FUNCTION(replacer)) if (IS_FUNCTION(replacer)) {
value = replacer.call(holder, key, value); value = replacer.call(holder, key, value);
}
// Unwrap value if necessary // Unwrap value if necessary
if (IS_OBJECT(value)) { if (IS_OBJECT(value)) {
if (IS_NUMBER_WRAPPER(value)) { if (IS_NUMBER_WRAPPER(value)) {
value = $Number(value); value = $Number(value);
} else if (IS_STRING_WRAPPER(value)) { } else if (IS_STRING_WRAPPER(value)) {
value = $String(value); value = $String(value);
} else if (IS_BOOLEAN_WRAPPER(value)) {
value = $Boolean(value);
} }
} }
switch (typeof value) { switch (typeof value) {
@ -232,12 +241,17 @@ function JSONStringify(value, replacer, space) {
} }
var gap; var gap;
if (IS_NUMBER(space)) { if (IS_NUMBER(space)) {
space = $Math.min(space, 100); space = $Math.min(space, 10);
gap = ""; gap = "";
for (var i = 0; i < space; i++) for (var i = 0; i < space; i++) {
gap += " "; gap += " ";
}
} else if (IS_STRING(space)) { } else if (IS_STRING(space)) {
gap = space; if (space.length > 10) {
gap = space.substring(0, 10);
} else {
gap = space;
}
} else { } else {
gap = ""; gap = "";
} }

3
deps/v8/src/jump-target-inl.h

@ -42,6 +42,9 @@ void JumpTarget::InitializeEntryElement(int index, FrameElement* target) {
} else if (target->is_copy()) { } else if (target->is_copy()) {
entry_frame_->elements_[target->index()].set_copied(); entry_frame_->elements_[target->index()].set_copied();
} }
if (direction_ == BIDIRECTIONAL) {
entry_frame_->elements_[index].set_number_info(NumberInfo::kUnknown);
}
} }
} } // namespace v8::internal } } // namespace v8::internal

50
deps/v8/src/jump-target.cc

@ -101,6 +101,17 @@ void JumpTarget::ComputeEntryFrame() {
if (element == NULL || !element->is_valid()) break; if (element == NULL || !element->is_valid()) break;
element = element->Combine(&reaching_frames_[j]->elements_[i]); element = element->Combine(&reaching_frames_[j]->elements_[i]);
FrameElement* other = &reaching_frames_[j]->elements_[i];
if (element != NULL && !element->is_copy()) {
ASSERT(other != NULL);
ASSERT(!other->is_copy());
// We overwrite the number information of one of the incoming frames.
// This is safe because we only use the frame for emitting merge code.
// The number information of incoming frames is not used anymore.
element->set_number_info(NumberInfo::Combine(element->number_info(),
other->number_info()));
}
} }
elements[i] = element; elements[i] = element;
} }
@ -117,6 +128,7 @@ void JumpTarget::ComputeEntryFrame() {
// elements as copied exactly when they have a copy. Undetermined // elements as copied exactly when they have a copy. Undetermined
// elements are initially recorded as if in memory. // elements are initially recorded as if in memory.
if (target != NULL) { if (target != NULL) {
ASSERT(!target->is_copy()); // These initial elements are never copies.
entry_frame_->elements_[index] = *target; entry_frame_->elements_[index] = *target;
InitializeEntryElement(index, target); InitializeEntryElement(index, target);
} }
@ -125,7 +137,8 @@ void JumpTarget::ComputeEntryFrame() {
for (; index < length; index++) { for (; index < length; index++) {
FrameElement* target = elements[index]; FrameElement* target = elements[index];
if (target == NULL) { if (target == NULL) {
entry_frame_->elements_.Add(FrameElement::MemoryElement()); entry_frame_->elements_.Add(
FrameElement::MemoryElement(NumberInfo::kUninitialized));
} else { } else {
entry_frame_->elements_.Add(*target); entry_frame_->elements_.Add(*target);
InitializeEntryElement(index, target); InitializeEntryElement(index, target);
@ -142,9 +155,20 @@ void JumpTarget::ComputeEntryFrame() {
RegisterFile candidate_registers; RegisterFile candidate_registers;
int best_count = kMinInt; int best_count = kMinInt;
int best_reg_num = RegisterAllocator::kInvalidRegister; int best_reg_num = RegisterAllocator::kInvalidRegister;
NumberInfo::Type info = NumberInfo::kUninitialized;
for (int j = 0; j < reaching_frames_.length(); j++) { for (int j = 0; j < reaching_frames_.length(); j++) {
FrameElement element = reaching_frames_[j]->elements_[i]; FrameElement element = reaching_frames_[j]->elements_[i];
if (direction_ == BIDIRECTIONAL) {
info = NumberInfo::kUnknown;
} else if (!element.is_copy()) {
info = NumberInfo::Combine(info, element.number_info());
} else {
// New elements will not be copies, so get number information from
// backing element in the reaching frame.
info = NumberInfo::Combine(info,
reaching_frames_[j]->elements_[element.index()].number_info());
}
is_synced = is_synced && element.is_synced(); is_synced = is_synced && element.is_synced();
if (element.is_register() && !entry_frame_->is_used(element.reg())) { if (element.is_register() && !entry_frame_->is_used(element.reg())) {
// Count the register occurrence and remember it if better // Count the register occurrence and remember it if better
@ -158,11 +182,17 @@ void JumpTarget::ComputeEntryFrame() {
} }
} }
// We must have a number type information now (not for copied elements).
ASSERT(entry_frame_->elements_[i].is_copy()
|| info != NumberInfo::kUninitialized);
// If the value is synced on all frames, put it in memory. This // If the value is synced on all frames, put it in memory. This
// costs nothing at the merge code but will incur a // costs nothing at the merge code but will incur a
// memory-to-register move when the value is needed later. // memory-to-register move when the value is needed later.
if (is_synced) { if (is_synced) {
// Already recorded as a memory element. // Already recorded as a memory element.
// Set combined number info.
entry_frame_->elements_[i].set_number_info(info);
continue; continue;
} }
@ -183,13 +213,27 @@ void JumpTarget::ComputeEntryFrame() {
bool is_copied = entry_frame_->elements_[i].is_copied(); bool is_copied = entry_frame_->elements_[i].is_copied();
Register reg = RegisterAllocator::ToRegister(best_reg_num); Register reg = RegisterAllocator::ToRegister(best_reg_num);
entry_frame_->elements_[i] = entry_frame_->elements_[i] =
FrameElement::RegisterElement(reg, FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED,
FrameElement::NOT_SYNCED); NumberInfo::kUninitialized);
if (is_copied) entry_frame_->elements_[i].set_copied(); if (is_copied) entry_frame_->elements_[i].set_copied();
entry_frame_->set_register_location(reg, i); entry_frame_->set_register_location(reg, i);
} }
// Set combined number info.
entry_frame_->elements_[i].set_number_info(info);
}
}
// If we have incoming backward edges assert we forget all number information.
#ifdef DEBUG
if (direction_ == BIDIRECTIONAL) {
for (int i = 0; i < length; ++i) {
if (!entry_frame_->elements_[i].is_copy()) {
ASSERT(entry_frame_->elements_[i].number_info() ==
NumberInfo::kUnknown);
}
} }
} }
#endif
// The stack pointer is at the highest synced element or the base of // The stack pointer is at the highest synced element or the base of
// the expression stack. // the expression stack.

87
deps/v8/src/liveedit.cc

@ -0,0 +1,87 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "liveedit.h"
#include "compiler.h"
#include "oprofile-agent.h"
#include "scopes.h"
#include "global-handles.h"
#include "debug.h"
namespace v8 {
namespace internal {
class FunctionInfoListener {
public:
void FunctionStarted(FunctionLiteral* fun) {
// Implementation follows.
}
void FunctionDone() {
// Implementation follows.
}
void FunctionScope(Scope* scope) {
// Implementation follows.
}
void FunctionCode(Handle<Code> function_code) {
// Implementation follows.
}
};
static FunctionInfoListener* active_function_info_listener = NULL;
LiveEditFunctionTracker::LiveEditFunctionTracker(FunctionLiteral* fun) {
if (active_function_info_listener != NULL) {
active_function_info_listener->FunctionStarted(fun);
}
}
LiveEditFunctionTracker::~LiveEditFunctionTracker() {
if (active_function_info_listener != NULL) {
active_function_info_listener->FunctionDone();
}
}
void LiveEditFunctionTracker::RecordFunctionCode(Handle<Code> code) {
if (active_function_info_listener != NULL) {
active_function_info_listener->FunctionCode(code);
}
}
void LiveEditFunctionTracker::RecordFunctionScope(Scope* scope) {
if (active_function_info_listener != NULL) {
active_function_info_listener->FunctionScope(scope);
}
}
bool LiveEditFunctionTracker::IsActive() {
return active_function_info_listener != NULL;
}
} } // namespace v8::internal

78
deps/v8/src/liveedit.h

@ -0,0 +1,78 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_LIVEEDIT_H_
#define V8_LIVEEDIT_H_
// Live Edit feature implementation.
// User should be able to change script on already running VM. This feature
// matches hot swap features in other frameworks.
//
// The basic use-case is when user spots some mistake in function body
// from debugger and wishes to change the algorithm without restart.
//
// A single change always has a form of a simple replacement (in pseudo-code):
// script.source[positions, positions+length] = new_string;
// Implementation first determines, which function's body includes this
// change area. Then both old and new versions of script are fully compiled
// in order to analyze, whether the function changed its outer scope
// expectations (or number of parameters). If it didn't, function's code is
// patched with a newly compiled code. If it did change, enclosing function
// gets patched. All inner functions are left untouched, whatever happened
// to them in a new script version. However, new version of code will
// instantiate newly compiled functions.
#include "compiler.h"
namespace v8 {
namespace internal {
// This class collects some specific information on structure of functions
// in a particular script. It gets called from compiler all the time, but
// actually records any data only when liveedit operation is in process;
// in any other time this class is very cheap.
//
// The primary interest of the Tracker is to record function scope structures
// in order to analyze whether function code maybe safely patched (with new
// code successfully reading existing data from function scopes). The Tracker
// also collects compiled function codes.
class LiveEditFunctionTracker {
public:
explicit LiveEditFunctionTracker(FunctionLiteral* fun);
~LiveEditFunctionTracker();
void RecordFunctionCode(Handle<Code> code);
void RecordFunctionScope(Scope* scope);
static bool IsActive();
};
} } // namespace v8::internal
#endif /* V*_LIVEEDIT_H_ */

9
deps/v8/src/log-utils.cc

@ -351,15 +351,6 @@ void LogMessageBuilder::WriteToLogFile() {
} }
void LogMessageBuilder::WriteCStringToLogFile(const char* str) {
const int len = StrLength(str);
const int written = Log::Write(str, len);
if (written != len && write_failure_handler != NULL) {
write_failure_handler();
}
}
// Formatting string for back references to the whole line. E.g. "#2" means // Formatting string for back references to the whole line. E.g. "#2" means
// "the second line above". // "the second line above".
const char* LogRecordCompressor::kLineBackwardReferenceFormat = "#%d"; const char* LogRecordCompressor::kLineBackwardReferenceFormat = "#%d";

3
deps/v8/src/log-utils.h

@ -268,9 +268,6 @@ class LogMessageBuilder BASE_EMBEDDED {
// Write the log message to the log file currently opened. // Write the log message to the log file currently opened.
void WriteToLogFile(); void WriteToLogFile();
// Write a null-terminated string to to the log file currently opened.
void WriteCStringToLogFile(const char* str);
// A handler that is called when Log::Write fails. // A handler that is called when Log::Write fails.
typedef void (*WriteFailureHandler)(); typedef void (*WriteFailureHandler)();

98
deps/v8/src/log.cc

@ -330,6 +330,8 @@ SlidingStateWindow* Logger::sliding_state_window_ = NULL;
const char** Logger::log_events_ = NULL; const char** Logger::log_events_ = NULL;
CompressionHelper* Logger::compression_helper_ = NULL; CompressionHelper* Logger::compression_helper_ = NULL;
bool Logger::is_logging_ = false; bool Logger::is_logging_ = false;
int Logger::cpu_profiler_nesting_ = 0;
int Logger::heap_profiler_nesting_ = 0;
#define DECLARE_LONG_EVENT(ignore1, long_name, ignore2) long_name, #define DECLARE_LONG_EVENT(ignore1, long_name, ignore2) long_name,
const char* kLongLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = { const char* kLongLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
@ -368,15 +370,6 @@ void Logger::LogAliases() {
#endif // ENABLE_LOGGING_AND_PROFILING #endif // ENABLE_LOGGING_AND_PROFILING
void Logger::Preamble(const char* content) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
msg.WriteCStringToLogFile(content);
#endif
}
void Logger::StringEvent(const char* name, const char* value) { void Logger::StringEvent(const char* name, const char* value) {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log) UncheckedStringEvent(name, value); if (FLAG_log) UncheckedStringEvent(name, value);
@ -1164,53 +1157,61 @@ int Logger::GetActiveProfilerModules() {
} }
void Logger::PauseProfiler(int flags) { void Logger::PauseProfiler(int flags, int tag) {
if (!Log::IsEnabled()) return; if (!Log::IsEnabled()) return;
const int active_modules = GetActiveProfilerModules(); if (flags & PROFILER_MODULE_CPU) {
const int modules_to_disable = active_modules & flags; // It is OK to have negative nesting.
if (modules_to_disable == PROFILER_MODULE_NONE) return; if (--cpu_profiler_nesting_ == 0) {
profiler_->pause();
if (modules_to_disable & PROFILER_MODULE_CPU) { if (FLAG_prof_lazy) {
profiler_->pause(); if (!FLAG_sliding_state_window) ticker_->Stop();
if (FLAG_prof_lazy) { FLAG_log_code = false;
if (!FLAG_sliding_state_window) ticker_->Stop(); // Must be the same message as Log::kDynamicBufferSeal.
FLAG_log_code = false; LOG(UncheckedStringEvent("profiler", "pause"));
// Must be the same message as Log::kDynamicBufferSeal. }
LOG(UncheckedStringEvent("profiler", "pause"));
} }
} }
if (modules_to_disable & if (flags &
(PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) { (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
FLAG_log_gc = false; if (--heap_profiler_nesting_ == 0) {
FLAG_log_gc = false;
}
}
if (tag != 0) {
IntEvent("close-tag", tag);
} }
// Turn off logging if no active modules remain. if (GetActiveProfilerModules() == PROFILER_MODULE_NONE) {
if ((active_modules & ~flags) == PROFILER_MODULE_NONE) {
is_logging_ = false; is_logging_ = false;
} }
} }
void Logger::ResumeProfiler(int flags) { void Logger::ResumeProfiler(int flags, int tag) {
if (!Log::IsEnabled()) return; if (!Log::IsEnabled()) return;
const int modules_to_enable = ~GetActiveProfilerModules() & flags; if (tag != 0) {
if (modules_to_enable != PROFILER_MODULE_NONE) { IntEvent("open-tag", tag);
is_logging_ = true;
} }
if (modules_to_enable & PROFILER_MODULE_CPU) { if (flags & PROFILER_MODULE_CPU) {
if (FLAG_prof_lazy) { if (cpu_profiler_nesting_++ == 0) {
profiler_->Engage(); is_logging_ = true;
LOG(UncheckedStringEvent("profiler", "resume")); if (FLAG_prof_lazy) {
FLAG_log_code = true; profiler_->Engage();
LogCompiledFunctions(); LOG(UncheckedStringEvent("profiler", "resume"));
LogFunctionObjects(); FLAG_log_code = true;
LogAccessorCallbacks(); LogCompiledFunctions();
if (!FLAG_sliding_state_window) ticker_->Start(); LogFunctionObjects();
LogAccessorCallbacks();
if (!FLAG_sliding_state_window) ticker_->Start();
}
profiler_->resume();
} }
profiler_->resume();
} }
if (modules_to_enable & if (flags &
(PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) { (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
FLAG_log_gc = true; if (heap_profiler_nesting_++ == 0) {
is_logging_ = true;
FLAG_log_gc = true;
}
} }
} }
@ -1219,7 +1220,7 @@ void Logger::ResumeProfiler(int flags) {
// either from main or Profiler's thread. // either from main or Profiler's thread.
void Logger::StopLoggingAndProfiling() { void Logger::StopLoggingAndProfiling() {
Log::stop(); Log::stop();
PauseProfiler(PROFILER_MODULE_CPU); PauseProfiler(PROFILER_MODULE_CPU, 0);
} }
@ -1261,7 +1262,9 @@ void Logger::LogCodeObject(Object* object) {
case Code::FUNCTION: case Code::FUNCTION:
return; // We log this later using LogCompiledFunctions. return; // We log this later using LogCompiledFunctions.
case Code::STUB: case Code::STUB:
description = CodeStub::MajorName(code_object->major_key()); description = CodeStub::MajorName(code_object->major_key(), true);
if (description == NULL)
description = "A stub from the snapshot";
tag = Logger::STUB_TAG; tag = Logger::STUB_TAG;
break; break;
case Code::BUILTIN: case Code::BUILTIN:
@ -1294,6 +1297,15 @@ void Logger::LogCodeObject(Object* object) {
} }
void Logger::LogCodeObjects() {
AssertNoAllocation no_alloc;
HeapIterator iterator;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (obj->IsCode()) LogCodeObject(obj);
}
}
void Logger::LogCompiledFunctions() { void Logger::LogCompiledFunctions() {
HandleScope scope; HandleScope scope;
const int compiled_funcs_count = EnumerateCompiledFunctions(NULL); const int compiled_funcs_count = EnumerateCompiledFunctions(NULL);

17
deps/v8/src/log.h

@ -161,12 +161,6 @@ class Logger {
// Enable the computation of a sliding window of states. // Enable the computation of a sliding window of states.
static void EnableSlidingStateWindow(); static void EnableSlidingStateWindow();
// Write a raw string to the log to be used as a preamble.
// No check is made that the 'preamble' is actually at the beginning
// of the log. The preample is used to write code events saved in the
// snapshot.
static void Preamble(const char* content);
// Emits an event with a string value -> (name, value). // Emits an event with a string value -> (name, value).
static void StringEvent(const char* name, const char* value); static void StringEvent(const char* name, const char* value);
@ -277,8 +271,8 @@ class Logger {
// Pause/Resume collection of profiling data. // Pause/Resume collection of profiling data.
// When data collection is paused, CPU Tick events are discarded until // When data collection is paused, CPU Tick events are discarded until
// data collection is Resumed. // data collection is Resumed.
static void PauseProfiler(int flags); static void PauseProfiler(int flags, int tag);
static void ResumeProfiler(int flags); static void ResumeProfiler(int flags, int tag);
static int GetActiveProfilerModules(); static int GetActiveProfilerModules();
// If logging is performed into a memory buffer, allows to // If logging is performed into a memory buffer, allows to
@ -292,7 +286,7 @@ class Logger {
// Logs all accessor callbacks found in the heap. // Logs all accessor callbacks found in the heap.
static void LogAccessorCallbacks(); static void LogAccessorCallbacks();
// Used for logging stubs found in the snapshot. // Used for logging stubs found in the snapshot.
static void LogCodeObject(Object* code_object); static void LogCodeObjects();
private: private:
@ -325,6 +319,9 @@ class Logger {
// Emits the source code of a regexp. Used by regexp events. // Emits the source code of a regexp. Used by regexp events.
static void LogRegExpSource(Handle<JSRegExp> regexp); static void LogRegExpSource(Handle<JSRegExp> regexp);
// Used for logging stubs found in the snapshot.
static void LogCodeObject(Object* code_object);
// Emits a profiler tick event. Used by the profiler thread. // Emits a profiler tick event. Used by the profiler thread.
static void TickEvent(TickSample* sample, bool overflow); static void TickEvent(TickSample* sample, bool overflow);
@ -376,6 +373,8 @@ class Logger {
friend class LoggerTestHelper; friend class LoggerTestHelper;
static bool is_logging_; static bool is_logging_;
static int cpu_profiler_nesting_;
static int heap_profiler_nesting_;
#else #else
static bool is_logging() { return false; } static bool is_logging() { return false; }
#endif #endif

9
deps/v8/src/macro-assembler.h

@ -61,6 +61,8 @@ enum AllocationFlags {
RESULT_CONTAINS_TOP = 1 << 1 RESULT_CONTAINS_TOP = 1 << 1
}; };
// Invalid depth in prototype chain.
const int kInvalidProtoDepth = -1;
#if V8_TARGET_ARCH_IA32 #if V8_TARGET_ARCH_IA32
#include "assembler.h" #include "assembler.h"
@ -86,6 +88,13 @@ enum AllocationFlags {
#endif #endif
#include "code.h" // must be after assembler_*.h #include "code.h" // must be after assembler_*.h
#include "arm/macro-assembler-arm.h" #include "arm/macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/constants-mips.h"
#include "assembler.h"
#include "mips/assembler-mips.h"
#include "mips/assembler-mips-inl.h"
#include "code.h" // must be after assembler_*.h
#include "mips/macro-assembler-mips.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif

2
deps/v8/src/math.js

@ -233,7 +233,7 @@ function SetupMath() {
"SQRT2", "SQRT2",
1.4142135623730951, 1.4142135623730951,
DONT_ENUM | DONT_DELETE | READ_ONLY); DONT_ENUM | DONT_DELETE | READ_ONLY);
%TransformToFastProperties($Math); %ToFastProperties($Math);
// Setup non-enumerable functions of the Math object and // Setup non-enumerable functions of the Math object and
// set their names. // set their names.

2
deps/v8/src/messages.js

@ -162,6 +162,8 @@ function FormatMessage(message) {
value_and_accessor: "Invalid property. A property cannot both have accessors and be writable or have a value: %0", value_and_accessor: "Invalid property. A property cannot both have accessors and be writable or have a value: %0",
proto_object_or_null: "Object prototype may only be an Object or null", proto_object_or_null: "Object prototype may only be an Object or null",
property_desc_object: "Property description must be an object: %0", property_desc_object: "Property description must be an object: %0",
redefine_disallowed: "Cannot redefine property: %0",
define_disallowed: "Cannot define property, object is not extensible: %0",
// RangeError // RangeError
invalid_array_length: "Invalid array length", invalid_array_length: "Invalid array length",
stack_overflow: "Maximum call stack size exceeded", stack_overflow: "Maximum call stack size exceeded",

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save