Date: Sat, 13 Mar 2010 12:47:41 -0800
Subject: [PATCH 04/18] FreeBsd->Solaris on homepage
---
doc/index.html | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/doc/index.html b/doc/index.html
index f7fefc14a4..431dabbe2f 100644
--- a/doc/index.html
+++ b/doc/index.html
@@ -105,7 +105,7 @@ server.listen(7000, "localhost");
Node eventually wants to support all POSIX operating systems
(including Windows with MinGW) but at the moment it is only being
- tested on Linux , Macintosh , and FreeBSD . The
+ tested on Linux , Macintosh , and Solaris . The
build system requires Python 2.4 or better. V8, on which Node is
built, supports only IA-32 and ARM processors. V8 is included in the
Node distribution. To use TLS, GnuTLS and libgpg-error are required.
From 49d30c6478df14ca41540ee7c8d8d9279f2a3651 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Herbert=20Voj=C4=8D=C3=ADk?=
Date: Sat, 13 Mar 2010 15:35:09 -0700
Subject: [PATCH 05/18] Only Ref in idle watcher when wasn't already active.
---
src/node_idle_watcher.cc | 13 ++++++++-----
src/node_idle_watcher.h | 1 +
2 files changed, 9 insertions(+), 5 deletions(-)
diff --git a/src/node_idle_watcher.cc b/src/node_idle_watcher.cc
index fc09953e69..031f2e0b14 100644
--- a/src/node_idle_watcher.cc
+++ b/src/node_idle_watcher.cc
@@ -88,14 +88,17 @@ Handle IdleWatcher::New(const Arguments& args) {
Handle IdleWatcher::Start(const Arguments& args) {
HandleScope scope;
-
IdleWatcher *idle = ObjectWrap::Unwrap(args.Holder());
+ idle->Start();
+ return Undefined();
+}
- ev_idle_start(EV_DEFAULT_UC_ &idle->watcher_);
-
- idle->Ref();
- return Undefined();
+void IdleWatcher::Start () {
+ if (!watcher_.active) {
+ ev_idle_start(EV_DEFAULT_UC_ &watcher_);
+ Ref();
+ }
}
diff --git a/src/node_idle_watcher.h b/src/node_idle_watcher.h
index 99cbf7601f..42298257cd 100644
--- a/src/node_idle_watcher.h
+++ b/src/node_idle_watcher.h
@@ -31,6 +31,7 @@ class IdleWatcher : ObjectWrap {
private:
static void Callback(EV_P_ ev_idle *watcher, int revents);
+ void Start();
void Stop();
ev_idle watcher_;
From 004faf384690aa67cfd377e6ebcfd1667d5f2489 Mon Sep 17 00:00:00 2001
From: Jed Schmidt
Date: Sun, 14 Mar 2010 12:36:45 +0900
Subject: [PATCH 06/18] Added check to make sure writeHead() is called before
write(), to prevent silent failure.
---
lib/http.js | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/lib/http.js b/lib/http.js
index ca196bc176..6b7d1f691c 100644
--- a/lib/http.js
+++ b/lib/http.js
@@ -110,6 +110,7 @@ function OutgoingMessage (connection) {
this.use_chunked_encoding_by_default = true;
this.flushing = false;
+ this.headWritten = false;
this.finished = false;
}
@@ -215,6 +216,10 @@ OutgoingMessage.prototype.sendBody = function () {
OutgoingMessage.prototype.write = function (chunk, encoding) {
+ if ( (this instanceof ServerResponse) && !this.headWritten) {
+ throw new Error("writeHead() must be called before write()")
+ }
+
encoding = encoding || "ascii";
if (this.chunked_encoding) {
this._send(process._byteLength(chunk, encoding).toString(16));
@@ -279,6 +284,7 @@ ServerResponse.prototype.writeHead = function (statusCode) {
var status_line = "HTTP/1.1 " + statusCode.toString() + " "
+ reasonPhrase + CRLF;
this.sendHeaderLines(status_line, headers);
+ this.headWritten = true;
};
// TODO eventually remove sendHeader(), writeHeader()
From 0d5a1fed33134d05911a153ade2bf8ae8710fc57 Mon Sep 17 00:00:00 2001
From: Krishna Rajendran
Date: Sun, 14 Mar 2010 23:24:29 -0400
Subject: [PATCH 07/18] Use gai_strerror when tcp.Server fails to resolve its
address or service.
---
src/node_net.cc | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/node_net.cc b/src/node_net.cc
index bc0f67315b..2c5b8de32c 100644
--- a/src/node_net.cc
+++ b/src/node_net.cc
@@ -837,7 +837,7 @@ Handle Server::Listen(const Arguments& args) {
host : NULL, *port, &server_tcp_hints, &address_list);
if (r != 0) {
- Local exception = Exception::Error(String::New(strerror(errno)));
+ Local exception = Exception::Error(String::New(gai_strerror(r)));
return ThrowException(exception);
}
From 4ccdc501d48a0f198a040ee46f4dab27a611a489 Mon Sep 17 00:00:00 2001
From: Ryan Dahl
Date: Mon, 15 Mar 2010 08:00:19 -0700
Subject: [PATCH 08/18] Include lib/ directory in node executable. Compile on
demand.
Instead of installing the files in /usr/lib/node/libraries and loading them
from the file system, the files are built-in to the node executable.
However, they are only compiled on demand.
The reasoning is:
1. Allow for more complex internal javascript. In particular,
process.stdout and process.stdin can be js implemented streams.
2. Ease system installs. Loading from disk each time is unnecessary
overhead. Note that there is no "system" path for modules anymore. Only
$HOME/.node_libraries.
---
doc/api.txt | 27 ++++-----------
lib/assert.js | 6 ++--
lib/fs.js | 2 +-
lib/http.js | 2 +-
src/node.cc | 21 +++++++++++-
src/node.js | 92 +++++++++++++++++++++++++++------------------------
tools/js2c.py | 19 +++++++++--
wscript | 8 ++---
8 files changed, 100 insertions(+), 77 deletions(-)
diff --git a/doc/api.txt b/doc/api.txt
index 2bdd10bcb6..def050430b 100644
--- a/doc/api.txt
+++ b/doc/api.txt
@@ -337,33 +337,20 @@ A module prefixed with +"./"+ is relative to the file calling +require()+.
That is, +circle.js+ must be in the same directory as +foo.js+ for
+require("./circle")+ to find it.
-Without the leading +"./"+, like +require("mjsunit")+ the module is searched
+Without the leading +"./"+, like +require("assert")+ the module is searched
for in the +require.paths+ array. +require.paths+ on my system looks like
this:
----------------------------------------
-[ "/home/ryan/.node_libraries"
-, "/usr/local/lib/node/libraries"
-]
+[ "/home/ryan/.node_libraries" ]
----------------------------------------
-That is, when +require("mjsunit")+ is called Node looks for
+That is, when +require("assert")+ is called Node looks for
- 1. +"/home/ryan/.node_libraries/mjsunit.js"+
-
- 2. +"/home/ryan/.node_libraries/mjsunit.node"+
-
- 3. +"/home/ryan/.node_libraries/mjsunit/index.js"+
-
- 4. +"/home/ryan/.node_libraries/mjsunit/index.node"+
-
- 5. +"/usr/local/lib/node/libraries/mjsunit.js"+
-
- 6. +"/usr/local/lib/node/libraries/mjsunit.node"+
-
- 7. +"/usr/local/lib/node/libraries/mjsunit/index.js"+
-
- 8. +"/usr/local/lib/node/libraries/mjsunit/index.node"+
+ 1. +"/home/ryan/.node_libraries/assert.js"+
+ 2. +"/home/ryan/.node_libraries/assert.node"+
+ 3. +"/home/ryan/.node_libraries/assert/index.js"+
+ 4. +"/home/ryan/.node_libraries/assert/index.node"+
interrupting once a file is found. Files ending in +".node"+ are binary Addon
Modules; see the section below about addons. +"index.js"+ allows one to
diff --git a/lib/assert.js b/lib/assert.js
index 0a8c19715e..6dc06213cc 100644
--- a/lib/assert.js
+++ b/lib/assert.js
@@ -6,7 +6,7 @@
// Copyright (c) 2009 Thomas Robinson <280north.com>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the “Software”), to
+// of this software and associated documentation files (the 'Software'), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
@@ -15,7 +15,7 @@
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
-// THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
@@ -23,7 +23,7 @@
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// UTILITY
-var inherits = require('./sys').inherits;
+var inherits = require('sys').inherits;
var pSlice = Array.prototype.slice;
// 1. The assert module provides functions that throw
diff --git a/lib/fs.js b/lib/fs.js
index f9eae9bd39..63fb166501 100644
--- a/lib/fs.js
+++ b/lib/fs.js
@@ -1,4 +1,4 @@
-var sys = require('./sys'),
+var sys = require('sys'),
events = require('events');
var fs = exports;
diff --git a/lib/http.js b/lib/http.js
index 6b7d1f691c..624e4573df 100644
--- a/lib/http.js
+++ b/lib/http.js
@@ -1,4 +1,4 @@
-var sys = require('./sys');
+var sys = require('sys');
var events = require('events');
var CRLF = "\r\n";
diff --git a/src/node.cc b/src/node.cc
index 8cb5e64ed8..b5736c6803 100644
--- a/src/node.cc
+++ b/src/node.cc
@@ -1156,6 +1156,26 @@ static void Load(int argc, char *argv[]) {
HTTPConnection::Initialize(http); // http.cc
+ Local natives = Object::New();
+ process->Set(String::New("natives"), natives);
+ // Explicitly define native sources.
+ natives->Set(String::New("assert"), String::New(native_assert));
+ natives->Set(String::New("dns"), String::New(native_dns));
+ natives->Set(String::New("file"), String::New(native_file));
+ natives->Set(String::New("fs"), String::New(native_fs));
+ natives->Set(String::New("http"), String::New(native_http));
+ natives->Set(String::New("ini"), String::New(native_ini));
+ natives->Set(String::New("mjsunit"), String::New(native_mjsunit));
+ natives->Set(String::New("multipart"), String::New(native_multipart));
+ natives->Set(String::New("posix"), String::New(native_posix));
+ natives->Set(String::New("querystring"), String::New(native_querystring));
+ natives->Set(String::New("repl"), String::New(native_repl));
+ natives->Set(String::New("sys"), String::New(native_sys));
+ natives->Set(String::New("tcp"), String::New(native_tcp));
+ natives->Set(String::New("uri"), String::New(native_uri));
+ natives->Set(String::New("url"), String::New(native_url));
+ natives->Set(String::New("utils"), String::New(native_utils));
+
// Compile, execute the src/node.js file. (Which was included as static C
// string in node_natives.h. 'natve_node' is the string containing that
@@ -1262,7 +1282,6 @@ static void ParseArgs(int *argc, char **argv) {
exit(0);
} else if (strcmp(arg, "--vars") == 0) {
printf("NODE_PREFIX: %s\n", NODE_PREFIX);
- printf("NODE_LIBRARIES_PREFIX: %s/%s\n", NODE_PREFIX, "lib/node/libraries");
printf("NODE_CFLAGS: %s\n", NODE_CFLAGS);
exit(0);
} else if (strcmp(arg, "--help") == 0 || strcmp(arg, "-h") == 0) {
diff --git a/src/node.js b/src/node.js
index 0d08a70009..858cd91a94 100644
--- a/src/node.js
+++ b/src/node.js
@@ -461,8 +461,7 @@ function existsSync (path) {
-process.paths = [ path.join(process.installPrefix, "lib/node/libraries")
- ];
+process.paths = [];
if (process.env["HOME"]) {
process.paths.unshift(path.join(process.env["HOME"], ".node_libraries"));
@@ -553,6 +552,8 @@ function resolveModulePath(request, parent) {
var id, paths;
if (request.charAt(0) == "." && (request.charAt(1) == "/" || request.charAt(1) == ".")) {
// Relative request
+ debug("RELATIVE: requested:" + request + " set ID to: "+id+" from "+parent.id);
+
var exts = ['js', 'node'], ext;
for (ext in extensionCache) {
exts.push(ext.slice(1));
@@ -561,7 +562,6 @@ function resolveModulePath(request, parent) {
var parentIdPath = path.dirname(parent.id +
(path.basename(parent.filename).match(new RegExp('^index\\.(' + exts.join('|') + ')$')) ? "/" : ""));
id = path.join(parentIdPath, request);
- // debug("RELATIVE: requested:"+request+" set ID to: "+id+" from "+parent.id+"("+parentIdPath+")");
paths = [path.dirname(parent.filename)];
} else {
id = request;
@@ -573,56 +573,61 @@ function resolveModulePath(request, parent) {
}
-function loadModuleSync (request, parent) {
- var resolvedModule = resolveModulePath(request, parent);
- var id = resolvedModule[0];
- var paths = resolvedModule[1];
+function loadModule (request, parent, callback) {
+ var resolvedModule = resolveModulePath(request, parent),
+ id = resolvedModule[0],
+ paths = resolvedModule[1];
- debug("loadModuleSync REQUEST " + (request) + " parent: " + parent.id);
+ debug("loadModule REQUEST " + (request) + " parent: " + parent.id);
var cachedModule = internalModuleCache[id] || parent.moduleCache[id];
- if (cachedModule) {
- debug("found " + JSON.stringify(id) + " in cache");
- return cachedModule.exports;
- } else {
- debug("looking for " + JSON.stringify(id) + " in " + JSON.stringify(paths));
- var filename = findModulePath(request, paths);
- if (!filename) {
- throw new Error("Cannot find module '" + request + "'");
- } else {
- var module = new Module(id, parent);
- module.loadSync(filename);
- return module.exports;
+ if (!cachedModule) {
+ // Try to compile from native modules
+ if (process.natives[id]) {
+ debug('load native module ' + id);
+ cachedModule = new Module(id);
+ var e = cachedModule._compile(process.natives[id], id);
+ if (e) throw e;
+ internalModuleCache[id] = cachedModule;
}
}
-}
-
-
-function loadModule (request, parent, callback) {
- var
- resolvedModule = resolveModulePath(request, parent),
- id = resolvedModule[0],
- paths = resolvedModule[1];
- debug("loadModule REQUEST " + (request) + " parent: " + parent.id);
-
- var cachedModule = internalModuleCache[id] || parent.moduleCache[id];
if (cachedModule) {
debug("found " + JSON.stringify(id) + " in cache");
- if (callback) callback(null, cachedModule.exports);
- } else {
- debug("looking for " + JSON.stringify(id) + " in " + JSON.stringify(paths));
+ if (callback) {
+ callback(null, cachedModule.exports);
+ } else {
+ return cachedModule.exports;
+ }
+
+ } else {
// Not in cache
- findModulePath(request, paths, function (filename) {
+ debug("looking for " + JSON.stringify(id) + " in " + JSON.stringify(paths));
+
+ if (!callback) {
+ // sync
+ var filename = findModulePath(request, paths);
if (!filename) {
- var err = new Error("Cannot find module '" + request + "'");
- if (callback) callback(err);
+ throw new Error("Cannot find module '" + request + "'");
} else {
var module = new Module(id, parent);
- module.load(filename, callback);
+ module.loadSync(filename);
+ return module.exports;
}
- });
+
+ } else {
+ // async
+ findModulePath(request, paths, function (filename) {
+ if (!filename) {
+ var err = new Error("Cannot find module '" + request + "'");
+ callback(err);
+ } else {
+ var module = new Module(id, parent);
+ module.load(filename, callback);
+ }
+ });
+ }
}
};
@@ -713,7 +718,8 @@ function cat (id, callback) {
}
-Module.prototype._loadContent = function (content, filename) {
+// Returns exception if any
+Module.prototype._compile = function (content, filename) {
var self = this;
// remove shebang
content = content.replace(/^\#\!.*/, '');
@@ -729,7 +735,7 @@ Module.prototype._loadContent = function (content, filename) {
}
function require (path) {
- return loadModuleSync(path, self);
+ return loadModule(path, self);
}
require.paths = process.paths;
@@ -765,7 +771,7 @@ Module.prototype._loadScriptSync = function (filename) {
// remove shebang
content = content.replace(/^\#\!.*/, '');
- var e = this._loadContent(content, filename);
+ var e = this._compile(content, filename);
if (e) {
throw e;
} else {
@@ -781,7 +787,7 @@ Module.prototype._loadScript = function (filename, callback) {
if (err) {
if (callback) callback(err);
} else {
- var e = self._loadContent(content, filename);
+ var e = self._compile(content, filename);
if (e) {
if (callback) callback(e);
} else {
diff --git a/tools/js2c.py b/tools/js2c.py
index fb38ece99b..64e5c2b625 100755
--- a/tools/js2c.py
+++ b/tools/js2c.py
@@ -35,11 +35,22 @@ import os, re, sys, string
import jsmin
-def ToCArray(lines):
+def ToCArray(filename, lines):
result = []
+ row = 1
+ col = 0
for chr in lines:
+ col += 1
+ if chr == "\n" or chr == "\r":
+ row += 1
+ col = 0
+
value = ord(chr)
- assert value < 128
+
+ if value > 128:
+ print 'non-ascii value ' + filename + ':' + str(row) + ':' + str(col)
+ sys.exit(1);
+
result.append(str(value))
result.append("0")
return ", ".join(result)
@@ -231,6 +242,7 @@ def JS2C(source, target):
# Locate the macros file name.
consts = {}
macros = {}
+
for s in source:
if 'macros.py' == (os.path.split(str(s))[1]):
(consts, macros) = ReadMacros(ReadLines(str(s)))
@@ -244,10 +256,11 @@ def JS2C(source, target):
delay = str(s).endswith('-delay.js')
lines = ReadFile(str(s))
do_jsmin = lines.find('// jsminify this file, js2c: jsmin') != -1
+
lines = ExpandConstants(lines, consts)
lines = ExpandMacros(lines, macros)
lines = CompressScript(lines, do_jsmin)
- data = ToCArray(lines)
+ data = ToCArray(s, lines)
id = (os.path.split(str(s))[1])[:-3]
if delay: id = id[:-6]
if delay:
diff --git a/wscript b/wscript
index 6310451ec9..6a8392b512 100644
--- a/wscript
+++ b/wscript
@@ -344,11 +344,11 @@ def build(bld):
js2c.JS2C(source, targets)
native_cc = bld.new_task_gen(
- source='src/node.js',
+ source='src/node.js ' + bld.path.ant_glob('lib/*.js'),
target="src/node_natives.h",
- before="cxx"
+ before="cxx",
+ install_path=None
)
- native_cc.install_path = None
# Add the rule /after/ cloning the debug
# This is a work around for an error had in python 2.4.3 (I'll paste the
@@ -458,8 +458,6 @@ def build(bld):
bld.install_files('${PREFIX}/lib/node/wafadmin', 'tools/wafadmin/*.py')
bld.install_files('${PREFIX}/lib/node/wafadmin/Tools', 'tools/wafadmin/Tools/*.py')
- bld.install_files('${PREFIX}/lib/node/libraries/', 'lib/*.js')
-
def shutdown():
Options.options.debug
# HACK to get binding.node out of build directory.
From 1395cba6cec0fb9179b39c12274e13e66e935130 Mon Sep 17 00:00:00 2001
From: Ryan Dahl
Date: Mon, 15 Mar 2010 08:07:33 -0700
Subject: [PATCH 09/18] Remove require.paths from process object
---
src/node.js | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/src/node.js b/src/node.js
index 858cd91a94..e2bf8b8aab 100644
--- a/src/node.js
+++ b/src/node.js
@@ -461,14 +461,14 @@ function existsSync (path) {
-process.paths = [];
+var modulePaths = [];
if (process.env["HOME"]) {
- process.paths.unshift(path.join(process.env["HOME"], ".node_libraries"));
+ modulePaths.unshift(path.join(process.env["HOME"], ".node_libraries"));
}
if (process.env["NODE_PATH"]) {
- process.paths = process.env["NODE_PATH"].split(":").concat(process.paths);
+ modulePaths = process.env["NODE_PATH"].split(":").concat(modulePaths);
}
@@ -566,7 +566,7 @@ function resolveModulePath(request, parent) {
} else {
id = request;
// debug("ABSOLUTE: id="+id);
- paths = process.paths;
+ paths = modulePaths;
}
return [id, paths];
@@ -738,7 +738,7 @@ Module.prototype._compile = function (content, filename) {
return loadModule(path, self);
}
- require.paths = process.paths;
+ require.paths = modulePaths;
require.async = requireAsync;
require.main = process.mainModule;
require.registerExtension = registerExtension;
From a6f904cab79c6d1f8dc93a877412695b8e3bc43d Mon Sep 17 00:00:00 2001
From: Ryan Dahl
Date: Mon, 15 Mar 2010 08:14:51 -0700
Subject: [PATCH 10/18] Tests should use compiled-in modules not build-dir
modules
---
test/common.js | 2 --
1 file changed, 2 deletions(-)
diff --git a/test/common.js b/test/common.js
index ec614564e2..0f8c1be52e 100644
--- a/test/common.js
+++ b/test/common.js
@@ -7,8 +7,6 @@ exports.fixturesDir = path.join(exports.testDir, "fixtures");
exports.libDir = path.join(exports.testDir, "../lib");
exports.PORT = 12346;
-require.paths.unshift(exports.libDir);
-
var sys = require("sys");
for (var i in sys) exports[i] = sys[i];
exports.assert = require('assert');
From d96c52694a56e10f2ba5db239680cb6a0af02120 Mon Sep 17 00:00:00 2001
From: Ryan Dahl
Date: Mon, 15 Mar 2010 08:18:45 -0700
Subject: [PATCH 11/18] Upgrade V8 to 2.1.4
---
deps/v8/ChangeLog | 7 +
deps/v8/include/v8.h | 8 +
deps/v8/src/SConscript | 1 +
deps/v8/src/api.cc | 1 +
deps/v8/src/arm/codegen-arm.cc | 18 +-
deps/v8/src/arm/codegen-arm.h | 13 +-
deps/v8/src/arm/full-codegen-arm.cc | 15 +-
deps/v8/src/arm/macro-assembler-arm.cc | 6 +-
deps/v8/src/arm/stub-cache-arm.cc | 117 +-
deps/v8/src/arm/virtual-frame-arm.h | 2 +
deps/v8/src/array.js | 81 +-
deps/v8/src/ast.cc | 107 +-
deps/v8/src/ast.h | 111 +-
deps/v8/src/bootstrapper.cc | 70 +-
deps/v8/src/builtins.cc | 99 +-
deps/v8/src/cached_powers.h | 119 +
deps/v8/src/checks.h | 22 +
deps/v8/src/codegen.cc | 48 +-
deps/v8/src/codegen.h | 30 +
deps/v8/src/compilation-cache.cc | 4 +-
deps/v8/src/compiler.cc | 44 +-
deps/v8/src/contexts.h | 8 -
deps/v8/src/conversions.cc | 16 +-
deps/v8/src/data-flow.cc | 1054 +-
deps/v8/src/data-flow.h | 305 +-
deps/v8/src/date-delay.js | 101 +-
deps/v8/src/debug.cc | 2 +-
deps/v8/src/diy_fp.h | 136 +
deps/v8/src/double.h | 169 +
deps/v8/src/factory.cc | 1 +
deps/v8/src/factory.h | 4 +-
deps/v8/src/globals.h | 41 +-
deps/v8/src/grisu3.cc | 494 +
deps/v8/src/grisu3.h | 55 +
deps/v8/src/heap.cc | 28 +-
deps/v8/src/ia32/assembler-ia32.cc | 16 +
deps/v8/src/ia32/assembler-ia32.h | 3 +-
deps/v8/src/ia32/codegen-ia32.cc | 223 +-
deps/v8/src/ia32/codegen-ia32.h | 49 +-
deps/v8/src/ia32/disasm-ia32.cc | 59 +-
deps/v8/src/ia32/full-codegen-ia32.cc | 5 +-
deps/v8/src/ia32/ic-ia32.cc | 131 +-
deps/v8/src/ia32/macro-assembler-ia32.cc | 119 +-
deps/v8/src/ia32/macro-assembler-ia32.h | 15 +
deps/v8/src/ia32/stub-cache-ia32.cc | 217 +-
deps/v8/src/ia32/virtual-frame-ia32.cc | 14 +-
deps/v8/src/ia32/virtual-frame-ia32.h | 3 +
deps/v8/src/ic.cc | 13 +-
deps/v8/src/log-utils.cc | 3 +
deps/v8/src/log-utils.h | 2 +-
deps/v8/src/log.cc | 5 +-
deps/v8/src/macros.py | 6 +-
deps/v8/src/math.js | 11 +-
deps/v8/src/messages.js | 3 +-
deps/v8/src/mips/codegen-mips-inl.h | 10 -
deps/v8/src/mips/codegen-mips.cc | 18 +-
deps/v8/src/mips/codegen-mips.h | 19 +-
deps/v8/src/mips/simulator-mips.cc | 8 +-
deps/v8/src/mips/virtual-frame-mips.h | 6 +-
deps/v8/src/objects-debug.cc | 2 +-
deps/v8/src/objects-inl.h | 19 +-
deps/v8/src/objects.cc | 21 +-
deps/v8/src/objects.h | 28 +-
deps/v8/src/parser.cc | 65 +-
deps/v8/src/parser.h | 3 +-
deps/v8/src/powers_ten.h | 2461 +
deps/v8/src/regexp-delay.js | 59 +-
deps/v8/src/runtime.cc | 478 +-
deps/v8/src/runtime.h | 8 +-
deps/v8/src/scopes.h | 1 -
deps/v8/src/string.js | 124 +-
deps/v8/src/stub-cache.cc | 8 -
deps/v8/src/stub-cache.h | 25 +-
deps/v8/src/top.cc | 48 +-
deps/v8/src/top.h | 5 -
deps/v8/src/utils.h | 40 +-
deps/v8/src/v8-counters.h | 1 -
deps/v8/src/version.cc | 2 +-
deps/v8/src/virtual-frame-inl.h | 6 +
deps/v8/src/x64/codegen-x64.cc | 8 +-
deps/v8/src/x64/codegen-x64.h | 13 +-
deps/v8/src/x64/full-codegen-x64.cc | 5 +-
deps/v8/src/x64/macro-assembler-x64.cc | 12 +-
deps/v8/src/x64/stub-cache-x64.cc | 131 +-
deps/v8/src/x64/virtual-frame-x64.h | 2 +
deps/v8/test/cctest/SConscript | 4 +
deps/v8/test/cctest/gay_shortest.cc | 100048 +++++++++++++++
deps/v8/test/cctest/gay_shortest.h | 44 +
deps/v8/test/cctest/test-diy_fp.cc | 67 +
deps/v8/test/cctest/test-double.cc | 204 +
deps/v8/test/cctest/test-grisu3.cc | 116 +
deps/v8/test/cctest/test-log-stack-tracer.cc | 8 +-
deps/v8/test/cctest/test-log.cc | 6 -
deps/v8/test/cctest/test-serialize.cc | 1 -
deps/v8/test/mjsunit/abs.js | 48 +
deps/v8/test/mjsunit/array-pop.js | 61 +
deps/v8/test/mjsunit/array-push.js | 68 +
deps/v8/test/mjsunit/fuzz-natives.js | 14 +-
deps/v8/test/mjsunit/regexp-compile.js | 42 +
deps/v8/test/mjsunit/regress/regress-641.js | 35 +
deps/v8/test/mjsunit/regress/regress-643.js | 37 +
.../mjsunit/regress/regress-crbug-37853.js | 34 +
deps/v8/tools/generate-ten-powers.scm | 286 +
deps/v8/tools/gyp/v8.gyp | 6 +
104 files changed, 107895 insertions(+), 1104 deletions(-)
create mode 100644 deps/v8/src/cached_powers.h
create mode 100644 deps/v8/src/diy_fp.h
create mode 100644 deps/v8/src/double.h
create mode 100644 deps/v8/src/grisu3.cc
create mode 100644 deps/v8/src/grisu3.h
create mode 100644 deps/v8/src/powers_ten.h
create mode 100644 deps/v8/test/cctest/gay_shortest.cc
create mode 100644 deps/v8/test/cctest/gay_shortest.h
create mode 100644 deps/v8/test/cctest/test-diy_fp.cc
create mode 100644 deps/v8/test/cctest/test-double.cc
create mode 100644 deps/v8/test/cctest/test-grisu3.cc
create mode 100644 deps/v8/test/mjsunit/abs.js
create mode 100644 deps/v8/test/mjsunit/array-pop.js
create mode 100644 deps/v8/test/mjsunit/array-push.js
create mode 100644 deps/v8/test/mjsunit/regexp-compile.js
create mode 100644 deps/v8/test/mjsunit/regress/regress-641.js
create mode 100644 deps/v8/test/mjsunit/regress/regress-643.js
create mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-37853.js
create mode 100644 deps/v8/tools/generate-ten-powers.scm
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 4363b19995..d1c8425a5f 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,10 @@
+2010-03-10: Version 2.1.4
+
+ Fixed code cache lookup for keyed IC's (issue http://crbug.com/37853).
+
+ Performance improvements on all platforms.
+
+
2010-03-10: Version 2.1.3
Added API method for context-disposal notifications.
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 882eeddf15..bed86cabd0 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -2421,6 +2421,14 @@ class V8EXPORT V8 {
*/
static int GetLogLines(int from_pos, char* dest_buf, int max_size);
+ /**
+ * The minimum allowed size for a log lines buffer. If the size of
+ * the buffer given will not be enough to hold a line of the maximum
+ * length, an attempt to find a log line end in GetLogLines will
+ * fail, and an empty result will be returned.
+ */
+ static const int kMinimumSizeForLogLinesBuffer = 2048;
+
/**
* Retrieve the V8 thread id of the calling thread.
*
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript
index d61da3e078..44b6b5b4f5 100755
--- a/deps/v8/src/SConscript
+++ b/deps/v8/src/SConscript
@@ -63,6 +63,7 @@ SOURCES = {
full-codegen.cc
func-name-inferrer.cc
global-handles.cc
+ grisu3.cc
handles.cc
hashmap.cc
heap-profiler.cc
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 93fce79bdf..af2e7ad243 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -3580,6 +3580,7 @@ int V8::GetActiveProfilerModules() {
int V8::GetLogLines(int from_pos, char* dest_buf, int max_size) {
#ifdef ENABLE_LOGGING_AND_PROFILING
+ ASSERT(max_size >= kMinimumSizeForLogLinesBuffer);
return i::Logger::GetLogLines(from_pos, dest_buf, max_size);
#endif
return 0;
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 9e59582593..3bf3fb8ccf 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -2709,18 +2709,20 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Comment cmnt(masm_, "[ ObjectLiteral");
// Load the function of this activation.
- __ ldr(r2, frame_->Function());
+ __ ldr(r3, frame_->Function());
// Literal array.
- __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
+ __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
// Literal index.
- __ mov(r1, Operand(Smi::FromInt(node->literal_index())));
+ __ mov(r2, Operand(Smi::FromInt(node->literal_index())));
// Constant properties.
- __ mov(r0, Operand(node->constant_properties()));
- frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
+ __ mov(r1, Operand(node->constant_properties()));
+ // Should the object literal have fast elements?
+ __ mov(r0, Operand(Smi::FromInt(node->fast_elements() ? 1 : 0)));
+ frame_->EmitPushMultiple(4, r3.bit() | r2.bit() | r1.bit() | r0.bit());
if (node->depth() > 1) {
- frame_->CallRuntime(Runtime::kCreateObjectLiteral, 3);
+ frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
- frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
+ frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
}
frame_->EmitPush(r0); // save the result
for (int i = 0; i < node->properties()->length(); i++) {
@@ -3597,7 +3599,7 @@ void CodeGenerator::GenerateArgumentsLength(ZoneList* args) {
}
-void CodeGenerator::GenerateArgumentsAccess(ZoneList* args) {
+void CodeGenerator::GenerateArguments(ZoneList* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 1);
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index bea98b6693..9ade70a048 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -197,6 +197,10 @@ class CodeGenerator: public AstVisitor {
static const int kUnknownIntValue = -1;
+ // If the name is an inline runtime function call return the number of
+ // expected arguments. Otherwise return -1.
+ static int InlineRuntimeCallArgumentsCount(Handle name);
+
private:
// Construction/Destruction
explicit CodeGenerator(MacroAssembler* masm);
@@ -326,6 +330,7 @@ class CodeGenerator: public AstVisitor {
struct InlineRuntimeLUT {
void (CodeGenerator::*method)(ZoneList*);
const char* name;
+ int nargs;
};
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle name);
@@ -360,7 +365,7 @@ class CodeGenerator: public AstVisitor {
// Support for arguments.length and arguments[?].
void GenerateArgumentsLength(ZoneList* args);
- void GenerateArgumentsAccess(ZoneList* args);
+ void GenerateArguments(ZoneList* args);
// Support for accessing the class and value fields of an object.
void GenerateClassOf(ZoneList* args);
@@ -396,14 +401,10 @@ class CodeGenerator: public AstVisitor {
// Fast support for number to string.
void GenerateNumberToString(ZoneList* args);
- // Fast support for Math.pow().
+ // Fast call to math functions.
void GenerateMathPow(ZoneList* args);
-
- // Fast call to sine function.
void GenerateMathSin(ZoneList* args);
void GenerateMathCos(ZoneList* args);
-
- // Fast support for Math.pow().
void GenerateMathSqrt(ZoneList* args);
// Simple condition analysis.
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 230818f5d1..a70cf44f80 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -783,15 +783,16 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- __ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
- __ mov(r1, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r0, Operand(expr->constant_properties()));
- __ stm(db_w, sp, r2.bit() | r1.bit() | r0.bit());
+ __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
+ __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
+ __ mov(r1, Operand(expr->constant_properties()));
+ __ mov(r0, Operand(Smi::FromInt(expr->fast_elements() ? 1 : 0)));
+ __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit() | r0.bit());
if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
+ __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
}
// If result_saved is true the result is on top of the stack. If
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 36bebdfeb4..bc779eb8de 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -280,9 +280,9 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
if (FLAG_debug_code) {
- mov(object, Operand(bit_cast(kZapValue)));
- mov(offset, Operand(bit_cast(kZapValue)));
- mov(scratch, Operand(bit_cast(kZapValue)));
+ mov(object, Operand(BitCast(kZapValue)));
+ mov(offset, Operand(BitCast(kZapValue)));
+ mov(scratch, Operand(BitCast(kZapValue)));
}
}
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 5d5b2a5d9a..abf2f643cd 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -815,6 +815,104 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
}
+Object* CallStubCompiler::CompileArrayPushCall(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ // TODO(639): faster implementation.
+ ASSERT(check == RECEIVER_MAP_CHECK);
+
+ Label miss;
+
+ // Get the receiver from the stack
+ const int argc = arguments().immediate();
+ __ ldr(r1, MemOperand(sp, argc * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Check that the maps haven't changed.
+ CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss);
+
+ if (object->IsGlobalObject()) {
+ __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+ __ str(r3, MemOperand(sp, argc * kPointerSize));
+ }
+
+ __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush),
+ argc + 1,
+ 1);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ Handle ic = ComputeCallMiss(arguments().immediate());
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ String* function_name = NULL;
+ if (function->shared()->name()->IsString()) {
+ function_name = String::cast(function->shared()->name());
+ }
+ return GetCode(CONSTANT_FUNCTION, function_name);
+}
+
+
+Object* CallStubCompiler::CompileArrayPopCall(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+
+ // TODO(642): faster implementation.
+ ASSERT(check == RECEIVER_MAP_CHECK);
+
+ Label miss;
+
+ // Get the receiver from the stack
+ const int argc = arguments().immediate();
+ __ ldr(r1, MemOperand(sp, argc * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Check that the maps haven't changed.
+ CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss);
+
+ if (object->IsGlobalObject()) {
+ __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+ __ str(r3, MemOperand(sp, argc * kPointerSize));
+ }
+
+ __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop),
+ argc + 1,
+ 1);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ Handle ic = ComputeCallMiss(arguments().immediate());
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ String* function_name = NULL;
+ if (function->shared()->name()->IsString()) {
+ function_name = String::cast(function->shared()->name());
+ }
+ return GetCode(CONSTANT_FUNCTION, function_name);
+}
+
+
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
@@ -824,6 +922,13 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// -- r2 : name
// -- lr : return address
// -----------------------------------
+ SharedFunctionInfo* function_info = function->shared();
+ if (function_info->HasCustomCallGenerator()) {
+ CustomCallGenerator generator =
+ ToCData(function_info->function_data());
+ return generator(this, object, holder, function, name, check);
+ }
+
Label miss;
// Get the receiver from the stack
@@ -916,18 +1021,6 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
break;
}
- case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
- CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss);
- // Make sure object->HasFastElements().
- // Get the elements array of the object.
- __ ldr(r3, FieldMemOperand(r1, JSObject::kElementsOffset));
- // Check that the object is in fast mode (not dictionary).
- __ ldr(r0, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(r0, ip);
- __ b(ne, &miss);
- break;
-
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h
index 6eb08119ed..7375b31833 100644
--- a/deps/v8/src/arm/virtual-frame-arm.h
+++ b/deps/v8/src/arm/virtual-frame-arm.h
@@ -364,6 +364,8 @@ class VirtualFrame : public ZoneObject {
// the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
inline void Nip(int num_dropped);
+ inline void SetTypeForLocalAt(int index, NumberInfo info);
+
private:
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index e33c2809ce..95d4ada164 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -1088,15 +1088,6 @@ function ArrayIsArray(obj) {
return IS_ARRAY(obj);
}
-// -------------------------------------------------------------------
-
-
-function UpdateFunctionLengths(lengths) {
- for (var key in lengths) {
- %FunctionSetLength(this[key], lengths[key]);
- }
-}
-
// -------------------------------------------------------------------
function SetupArray() {
@@ -1109,47 +1100,47 @@ function SetupArray() {
"isArray", ArrayIsArray
));
+ var specialFunctions = %SpecialArrayFunctions({});
+
+ function getFunction(name, jsBuiltin, len) {
+ var f = jsBuiltin;
+ if (specialFunctions.hasOwnProperty(name)) {
+ f = specialFunctions[name];
+ }
+ if (!IS_UNDEFINED(len)) {
+ %FunctionSetLength(f, len);
+ }
+ return f;
+ }
+
// Setup non-enumerable functions of the Array.prototype object and
// set their names.
+ // Manipulate the length of some of the functions to meet
+ // expectations set by ECMA-262 or Mozilla.
InstallFunctionsOnHiddenPrototype($Array.prototype, DONT_ENUM, $Array(
- "toString", ArrayToString,
- "toLocaleString", ArrayToLocaleString,
- "join", ArrayJoin,
- "pop", ArrayPop,
- "push", ArrayPush,
- "concat", ArrayConcat,
- "reverse", ArrayReverse,
- "shift", ArrayShift,
- "unshift", ArrayUnshift,
- "slice", ArraySlice,
- "splice", ArraySplice,
- "sort", ArraySort,
- "filter", ArrayFilter,
- "forEach", ArrayForEach,
- "some", ArraySome,
- "every", ArrayEvery,
- "map", ArrayMap,
- "indexOf", ArrayIndexOf,
- "lastIndexOf", ArrayLastIndexOf,
- "reduce", ArrayReduce,
- "reduceRight", ArrayReduceRight
+ "toString", getFunction("toString", ArrayToString),
+ "toLocaleString", getFunction("toLocaleString", ArrayToLocaleString),
+ "join", getFunction("join", ArrayJoin),
+ "pop", getFunction("pop", ArrayPop),
+ "push", getFunction("push", ArrayPush, 1),
+ "concat", getFunction("concat", ArrayConcat),
+ "reverse", getFunction("reverse", ArrayReverse),
+ "shift", getFunction("shift", ArrayShift),
+ "unshift", getFunction("unshift", ArrayUnshift, 1),
+ "slice", getFunction("slice", ArraySlice, 2),
+ "splice", getFunction("splice", ArraySplice, 2),
+ "sort", getFunction("sort", ArraySort),
+ "filter", getFunction("filter", ArrayFilter, 1),
+ "forEach", getFunction("forEach", ArrayForEach, 1),
+ "some", getFunction("some", ArraySome, 1),
+ "every", getFunction("every", ArrayEvery, 1),
+ "map", getFunction("map", ArrayMap, 1),
+ "indexOf", getFunction("indexOf", ArrayIndexOf, 1),
+ "lastIndexOf", getFunction("lastIndexOf", ArrayLastIndexOf, 1),
+ "reduce", getFunction("reduce", ArrayReduce, 1),
+ "reduceRight", getFunction("reduceRight", ArrayReduceRight, 1)
));
- // Manipulate the length of some of the functions to meet
- // expectations set by ECMA-262 or Mozilla.
- UpdateFunctionLengths({
- ArrayFilter: 1,
- ArrayForEach: 1,
- ArraySome: 1,
- ArrayEvery: 1,
- ArrayMap: 1,
- ArrayIndexOf: 1,
- ArrayLastIndexOf: 1,
- ArrayPush: 1,
- ArrayReduce: 1,
- ArrayReduceRight: 1
- });
-
%FinishArrayPrototypeSetup($Array.prototype);
}
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 062a5c67ad..339cfa140e 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -58,13 +58,27 @@ AST_NODE_LIST(DECL_ACCEPT)
// ----------------------------------------------------------------------------
// Implementation of other node functionality.
+Assignment* ExpressionStatement::StatementAsSimpleAssignment() {
+ return (expression()->AsAssignment() != NULL &&
+ !expression()->AsAssignment()->is_compound())
+ ? expression()->AsAssignment()
+ : NULL;
+}
+
+
+CountOperation* ExpressionStatement::StatementAsCountOperation() {
+ return expression()->AsCountOperation();
+}
+
+
VariableProxy::VariableProxy(Handle name,
bool is_this,
bool inside_with)
: name_(name),
var_(NULL),
is_this_(is_this),
- inside_with_(inside_with) {
+ inside_with_(inside_with),
+ is_trivial_(false) {
// names must be canonicalized for fast equality checks
ASSERT(name->IsSymbol());
}
@@ -484,5 +498,96 @@ RegExpAlternative::RegExpAlternative(ZoneList* nodes)
}
}
+// IsPrimitive implementation. IsPrimitive is true if the value of an
+// expression is known at compile-time to be any JS type other than Object
+// (e.g, it is Undefined, Null, Boolean, String, or Number).
+
+// The following expression types are never primitive because they express
+// Object values.
+bool FunctionLiteral::IsPrimitive() { return false; }
+bool FunctionBoilerplateLiteral::IsPrimitive() { return false; }
+bool RegExpLiteral::IsPrimitive() { return false; }
+bool ObjectLiteral::IsPrimitive() { return false; }
+bool ArrayLiteral::IsPrimitive() { return false; }
+bool CatchExtensionObject::IsPrimitive() { return false; }
+bool CallNew::IsPrimitive() { return false; }
+bool ThisFunction::IsPrimitive() { return false; }
+
+
+// The following expression types are not always primitive because we do not
+// have enough information to conclude that they are.
+bool VariableProxy::IsPrimitive() { return false; }
+bool Property::IsPrimitive() { return false; }
+bool Call::IsPrimitive() { return false; }
+bool CallRuntime::IsPrimitive() { return false; }
+
+
+// The value of a conditional is the value of one of the alternatives. It's
+// always primitive if both alternatives are always primitive.
+bool Conditional::IsPrimitive() {
+ return then_expression()->IsPrimitive() && else_expression()->IsPrimitive();
+}
+
+
+// A literal is primitive when it is not a JSObject.
+bool Literal::IsPrimitive() { return !handle()->IsJSObject(); }
+
+
+// The value of an assignment is the value of its right-hand side.
+bool Assignment::IsPrimitive() {
+ switch (op()) {
+ case Token::INIT_VAR:
+ case Token::INIT_CONST:
+ case Token::ASSIGN:
+ return value()->IsPrimitive();
+
+ default:
+ // {|=, ^=, &=, <<=, >>=, >>>=, +=, -=, *=, /=, %=}
+ // Arithmetic operations are always primitive. They express Numbers
+ // with the exception of +, which expresses a Number or a String.
+ return true;
+ }
+}
+
+
+// Throw does not express a value, so it's trivially always primitive.
+bool Throw::IsPrimitive() { return true; }
+
+
+// Unary operations always express primitive values. delete and ! express
+// Booleans, void Undefined, typeof String, +, -, and ~ Numbers.
+bool UnaryOperation::IsPrimitive() { return true; }
+
+
+// Count operations (pre- and post-fix increment and decrement) always
+// express primitive values (Numbers). See ECMA-262-3, 11.3.1, 11.3.2,
+// 11.4.4, ane 11.4.5.
+bool CountOperation::IsPrimitive() { return true; }
+
+
+// Binary operations depend on the operator.
+bool BinaryOperation::IsPrimitive() {
+ switch (op()) {
+ case Token::COMMA:
+ // Value is the value of the right subexpression.
+ return right()->IsPrimitive();
+
+ case Token::OR:
+ case Token::AND:
+ // Value is the value one of the subexpressions.
+ return left()->IsPrimitive() && right()->IsPrimitive();
+
+ default:
+ // {|, ^, &, <<, >>, >>>, +, -, *, /, %}
+ // Arithmetic operations are always primitive. They express Numbers
+ // with the exception of +, which expresses a Number or a String.
+ return true;
+ }
+}
+
+
+// Compare operations always express Boolean values.
+bool CompareOperation::IsPrimitive() { return true; }
+
} } // namespace v8::internal
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 13502dc2a8..0d654b19c8 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -137,6 +137,7 @@ class AstNode: public ZoneObject {
virtual BreakableStatement* AsBreakableStatement() { return NULL; }
virtual IterationStatement* AsIterationStatement() { return NULL; }
virtual UnaryOperation* AsUnaryOperation() { return NULL; }
+ virtual CountOperation* AsCountOperation() { return NULL; }
virtual BinaryOperation* AsBinaryOperation() { return NULL; }
virtual Assignment* AsAssignment() { return NULL; }
virtual FunctionLiteral* AsFunctionLiteral() { return NULL; }
@@ -161,6 +162,9 @@ class Statement: public AstNode {
virtual Statement* AsStatement() { return this; }
virtual ReturnStatement* AsReturnStatement() { return NULL; }
+ virtual Assignment* StatementAsSimpleAssignment() { return NULL; }
+ virtual CountOperation* StatementAsCountOperation() { return NULL; }
+
bool IsEmpty() { return AsEmptyStatement() != NULL; }
void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
@@ -200,6 +204,8 @@ class Expression: public AstNode {
virtual bool IsValidLeftHandSide() { return false; }
+ virtual Variable* AssignedVar() { return NULL; }
+
// Symbols that cannot be parsed as array indices are considered property
// names. We do not treat symbols that can be array indexes as property
// names because [] for string objects is handled only by keyed ICs.
@@ -214,6 +220,10 @@ class Expression: public AstNode {
// evaluate out of order.
virtual bool IsTrivial() { return false; }
+ // True if the expression always has one of the non-Object JS types
+ // (Undefined, Null, Boolean, String, or Number).
+ virtual bool IsPrimitive() = 0;
+
// Mark the expression as being compiled as an expression
// statement. This is used to transform postfix increments to
// (faster) prefix increments.
@@ -274,6 +284,12 @@ class ValidLeftHandSideSentinel: public Expression {
virtual bool IsValidLeftHandSide() { return true; }
virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
static ValidLeftHandSideSentinel* instance() { return &instance_; }
+
+ virtual bool IsPrimitive() {
+ UNREACHABLE();
+ return false;
+ }
+
private:
static ValidLeftHandSideSentinel instance_;
};
@@ -321,6 +337,16 @@ class Block: public BreakableStatement {
virtual void Accept(AstVisitor* v);
+ virtual Assignment* StatementAsSimpleAssignment() {
+ if (statements_.length() != 1) return NULL;
+ return statements_[0]->StatementAsSimpleAssignment();
+ }
+
+ virtual CountOperation* StatementAsCountOperation() {
+ if (statements_.length() != 1) return NULL;
+ return statements_[0]->StatementAsCountOperation();
+ }
+
void AddStatement(Statement* statement) { statements_.Add(statement); }
ZoneList* statements() { return &statements_; }
@@ -442,8 +468,8 @@ class ForStatement: public IterationStatement {
init_(NULL),
cond_(NULL),
next_(NULL),
- may_have_function_literal_(true) {
- }
+ may_have_function_literal_(true),
+ loop_variable_(NULL) {}
void Initialize(Statement* init,
Expression* cond,
@@ -464,12 +490,17 @@ class ForStatement: public IterationStatement {
return may_have_function_literal_;
}
+ bool is_fast_smi_loop() { return loop_variable_ != NULL; }
+ Variable* loop_variable() { return loop_variable_; }
+ void set_loop_variable(Variable* var) { loop_variable_ = var; }
+
private:
Statement* init_;
Expression* cond_;
Statement* next_;
// True if there is a function literal subexpression in the condition.
bool may_have_function_literal_;
+ Variable* loop_variable_;
friend class AstOptimizer;
};
@@ -507,6 +538,9 @@ class ExpressionStatement: public Statement {
// Type testing & conversion.
virtual ExpressionStatement* AsExpressionStatement() { return this; }
+ virtual Assignment* StatementAsSimpleAssignment();
+ virtual CountOperation* StatementAsCountOperation();
+
void set_expression(Expression* e) { expression_ = e; }
Expression* expression() { return expression_; }
@@ -774,6 +808,7 @@ class Literal: public Expression {
virtual bool IsLeaf() { return true; }
virtual bool IsTrivial() { return true; }
+ virtual bool IsPrimitive();
// Identity testers.
bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
@@ -849,24 +884,31 @@ class ObjectLiteral: public MaterializedLiteral {
ZoneList* properties,
int literal_index,
bool is_simple,
+ bool fast_elements,
int depth)
: MaterializedLiteral(literal_index, is_simple, depth),
constant_properties_(constant_properties),
- properties_(properties) {}
+ properties_(properties),
+ fast_elements_(fast_elements) {}
virtual ObjectLiteral* AsObjectLiteral() { return this; }
virtual void Accept(AstVisitor* v);
virtual bool IsLeaf() { return properties()->is_empty(); }
+ virtual bool IsPrimitive();
+
Handle constant_properties() const {
return constant_properties_;
}
ZoneList* properties() const { return properties_; }
+ bool fast_elements() const { return fast_elements_; }
+
private:
Handle constant_properties_;
ZoneList* properties_;
+ bool fast_elements_;
};
@@ -884,6 +926,8 @@ class RegExpLiteral: public MaterializedLiteral {
virtual bool IsLeaf() { return true; }
+ virtual bool IsPrimitive();
+
Handle pattern() const { return pattern_; }
Handle flags() const { return flags_; }
@@ -910,6 +954,8 @@ class ArrayLiteral: public MaterializedLiteral {
virtual bool IsLeaf() { return values()->is_empty(); }
+ virtual bool IsPrimitive();
+
Handle constant_elements() const { return constant_elements_; }
ZoneList* values() const { return values_; }
@@ -930,6 +976,8 @@ class CatchExtensionObject: public Expression {
virtual void Accept(AstVisitor* v);
+ virtual bool IsPrimitive();
+
Literal* key() const { return key_; }
VariableProxy* value() const { return value_; }
@@ -964,7 +1012,9 @@ class VariableProxy: public Expression {
// Reading from a mutable variable is a side effect, but 'this' is
// immutable.
- virtual bool IsTrivial() { return is_this(); }
+ virtual bool IsTrivial() { return is_trivial_; }
+
+ virtual bool IsPrimitive();
bool IsVariable(Handle n) {
return !is_this() && name().is_identical_to(n);
@@ -979,6 +1029,8 @@ class VariableProxy: public Expression {
Variable* var() const { return var_; }
bool is_this() const { return is_this_; }
bool inside_with() const { return inside_with_; }
+ bool is_trivial() { return is_trivial_; }
+ void set_is_trivial(bool b) { is_trivial_ = b; }
// Bind this proxy to the variable var.
void BindTo(Variable* var);
@@ -988,6 +1040,7 @@ class VariableProxy: public Expression {
Variable* var_; // resolved variable, or NULL
bool is_this_;
bool inside_with_;
+ bool is_trivial_;
VariableProxy(Handle name, bool is_this, bool inside_with);
explicit VariableProxy(bool is_this);
@@ -1004,6 +1057,11 @@ class VariableProxySentinel: public VariableProxy {
return &identifier_proxy_;
}
+ virtual bool IsPrimitive() {
+ UNREACHABLE();
+ return false;
+ }
+
private:
explicit VariableProxySentinel(bool is_this) : VariableProxy(is_this) { }
static VariableProxySentinel this_proxy_;
@@ -1047,6 +1105,11 @@ class Slot: public Expression {
virtual bool IsLeaf() { return true; }
+ virtual bool IsPrimitive() {
+ UNREACHABLE();
+ return false;
+ }
+
bool IsStackAllocated() { return type_ == PARAMETER || type_ == LOCAL; }
// Accessors
@@ -1079,6 +1142,8 @@ class Property: public Expression {
virtual bool IsValidLeftHandSide() { return true; }
+ virtual bool IsPrimitive();
+
Expression* obj() const { return obj_; }
Expression* key() const { return key_; }
int position() const { return pos_; }
@@ -1109,6 +1174,8 @@ class Call: public Expression {
// Type testing and conversion.
virtual Call* AsCall() { return this; }
+ virtual bool IsPrimitive();
+
Expression* expression() const { return expression_; }
ZoneList* arguments() const { return arguments_; }
int position() { return pos_; }
@@ -1131,6 +1198,8 @@ class CallNew: public Expression {
virtual void Accept(AstVisitor* v);
+ virtual bool IsPrimitive();
+
Expression* expression() const { return expression_; }
ZoneList* arguments() const { return arguments_; }
int position() { return pos_; }
@@ -1155,6 +1224,8 @@ class CallRuntime: public Expression {
virtual void Accept(AstVisitor* v);
+ virtual bool IsPrimitive();
+
Handle name() const { return name_; }
Runtime::Function* function() const { return function_; }
ZoneList* arguments() const { return arguments_; }
@@ -1179,6 +1250,8 @@ class UnaryOperation: public Expression {
// Type testing & conversion
virtual UnaryOperation* AsUnaryOperation() { return this; }
+ virtual bool IsPrimitive();
+
Token::Value op() const { return op_; }
Expression* expression() const { return expression_; }
@@ -1200,6 +1273,8 @@ class BinaryOperation: public Expression {
// Type testing & conversion
virtual BinaryOperation* AsBinaryOperation() { return this; }
+ virtual bool IsPrimitive();
+
// True iff the result can be safely overwritten (to avoid allocation).
// False for operations that can return one of their operands.
bool ResultOverwriteAllowed() {
@@ -1246,6 +1321,14 @@ class CountOperation: public Expression {
virtual void Accept(AstVisitor* v);
+ virtual CountOperation* AsCountOperation() { return this; }
+
+ virtual Variable* AssignedVar() {
+ return expression()->AsVariableProxy()->AsVariable();
+ }
+
+ virtual bool IsPrimitive();
+
bool is_prefix() const { return is_prefix_; }
bool is_postfix() const { return !is_prefix_; }
Token::Value op() const { return op_; }
@@ -1272,6 +1355,8 @@ class CompareOperation: public Expression {
virtual void Accept(AstVisitor* v);
+ virtual bool IsPrimitive();
+
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
Expression* right() const { return right_; }
@@ -1302,6 +1387,8 @@ class Conditional: public Expression {
virtual void Accept(AstVisitor* v);
+ virtual bool IsPrimitive();
+
Expression* condition() const { return condition_; }
Expression* then_expression() const { return then_expression_; }
Expression* else_expression() const { return else_expression_; }
@@ -1324,6 +1411,14 @@ class Assignment: public Expression {
virtual void Accept(AstVisitor* v);
virtual Assignment* AsAssignment() { return this; }
+ virtual bool IsPrimitive();
+
+ Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; }
+
+ virtual Variable* AssignedVar() {
+ return target()->AsVariableProxy()->AsVariable();
+ }
+
Token::Value binary_op() const;
Token::Value op() const { return op_; }
@@ -1358,6 +1453,9 @@ class Throw: public Expression {
: exception_(exception), pos_(pos) {}
virtual void Accept(AstVisitor* v);
+
+ virtual bool IsPrimitive();
+
Expression* exception() const { return exception_; }
int position() const { return pos_; }
@@ -1407,6 +1505,8 @@ class FunctionLiteral: public Expression {
virtual bool IsLeaf() { return true; }
+ virtual bool IsPrimitive();
+
Handle name() const { return name_; }
Scope* scope() const { return scope_; }
ZoneList* body() const { return body_; }
@@ -1477,6 +1577,8 @@ class FunctionBoilerplateLiteral: public Expression {
virtual void Accept(AstVisitor* v);
+ virtual bool IsPrimitive();
+
private:
Handle boilerplate_;
};
@@ -1486,6 +1588,7 @@ class ThisFunction: public Expression {
public:
virtual void Accept(AstVisitor* v);
virtual bool IsLeaf() { return true; }
+ virtual bool IsPrimitive();
};
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 12efbc17ab..8df37d2e77 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -55,7 +55,7 @@ class SourceCodeCache BASE_EMBEDDED {
}
void Iterate(ObjectVisitor* v) {
- v->VisitPointer(bit_cast(&cache_));
+ v->VisitPointer(BitCast(&cache_));
}
@@ -245,12 +245,6 @@ class Genesis BASE_EMBEDDED {
bool make_prototype_enumerable = false);
void MakeFunctionInstancePrototypeWritable();
- void AddSpecialFunction(Handle prototype,
- const char* name,
- Handle code);
-
- void BuildSpecialFunctionTable();
-
static bool CompileBuiltin(int index);
static bool CompileNative(Vector name, Handle source);
static bool CompileScriptCached(Vector name,
@@ -777,8 +771,6 @@ void Genesis::CreateRoots(v8::Handle global_template,
delegate->shared()->DontAdaptArguments();
}
- global_context()->set_special_function_table(Heap::empty_fixed_array());
-
// Initialize the out of memory slot.
global_context()->set_out_of_memory(Heap::false_value());
@@ -1457,65 +1449,6 @@ void Genesis::MakeFunctionInstancePrototypeWritable() {
}
-void Genesis::AddSpecialFunction(Handle prototype,
- const char* name,
- Handle code) {
- Handle key = Factory::LookupAsciiSymbol(name);
- Handle value = Handle(prototype->GetProperty(*key));
- if (value->IsJSFunction()) {
- Handle optimized = Factory::NewFunction(key,
- JS_OBJECT_TYPE,
- JSObject::kHeaderSize,
- code,
- false);
- optimized->shared()->DontAdaptArguments();
- int len = global_context()->special_function_table()->length();
- Handle new_array = Factory::NewFixedArray(len + 3);
- for (int index = 0; index < len; index++) {
- new_array->set(index,
- global_context()->special_function_table()->get(index));
- }
- new_array->set(len+0, *prototype);
- new_array->set(len+1, *value);
- new_array->set(len+2, *optimized);
- global_context()->set_special_function_table(*new_array);
- }
-}
-
-
-void Genesis::BuildSpecialFunctionTable() {
- HandleScope scope;
- Handle global = Handle(global_context()->global());
- // Add special versions for some Array.prototype functions.
- Handle function =
- Handle(
- JSFunction::cast(global->GetProperty(Heap::Array_symbol())));
- Handle visible_prototype =
- Handle(JSObject::cast(function->prototype()));
- // Remember to put those specializations on the hidden prototype if present.
- Handle special_prototype;
- Handle superproto(visible_prototype->GetPrototype());
- if (superproto->IsJSObject() &&
- JSObject::cast(*superproto)->map()->is_hidden_prototype()) {
- special_prototype = Handle::cast(superproto);
- } else {
- special_prototype = visible_prototype;
- }
- AddSpecialFunction(special_prototype, "pop",
- Handle(Builtins::builtin(Builtins::ArrayPop)));
- AddSpecialFunction(special_prototype, "push",
- Handle(Builtins::builtin(Builtins::ArrayPush)));
- AddSpecialFunction(special_prototype, "shift",
- Handle(Builtins::builtin(Builtins::ArrayShift)));
- AddSpecialFunction(special_prototype, "unshift",
- Handle(Builtins::builtin(Builtins::ArrayUnshift)));
- AddSpecialFunction(special_prototype, "slice",
- Handle(Builtins::builtin(Builtins::ArraySlice)));
- AddSpecialFunction(special_prototype, "splice",
- Handle(Builtins::builtin(Builtins::ArraySplice)));
-}
-
-
Genesis::Genesis(Handle global_object,
v8::Handle global_template,
v8::ExtensionConfiguration* extensions) {
@@ -1539,7 +1472,6 @@ Genesis::Genesis(Handle global_object,
if (!InstallNatives()) return;
MakeFunctionInstancePrototypeWritable();
- BuildSpecialFunctionTable();
if (!ConfigureGlobalObjects(global_template)) return;
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index a8ba818c41..e59dbcf29d 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -319,6 +319,24 @@ static bool ArrayPrototypeHasNoElements() {
}
+static bool IsJSArrayWithFastElements(Object* receiver,
+ FixedArray** elements) {
+ if (!receiver->IsJSArray()) {
+ return false;
+ }
+
+ JSArray* array = JSArray::cast(receiver);
+
+ HeapObject* elms = HeapObject::cast(array->elements());
+ if (elms->map() != Heap::fixed_array_map()) {
+ return false;
+ }
+
+ *elements = FixedArray::cast(elms);
+ return true;
+}
+
+
static Object* CallJsBuiltin(const char* name,
BuiltinArguments args) {
HandleScope handleScope;
@@ -331,7 +349,7 @@ static Object* CallJsBuiltin(const char* name,
Vector argv(Vector::New(args.length() - 1));
int n_args = args.length() - 1;
for (int i = 0; i < n_args; i++) {
- argv[i] = &args[i + 1];
+ argv[i] = args.at(i + 1).location();
}
bool pending_exception = false;
Handle result = Execution::Call(function,
@@ -346,8 +364,12 @@ static Object* CallJsBuiltin(const char* name,
BUILTIN(ArrayPush) {
- JSArray* array = JSArray::cast(*args.receiver());
- ASSERT(array->HasFastElements());
+ Object* receiver = *args.receiver();
+ FixedArray* elms = NULL;
+ if (!IsJSArrayWithFastElements(receiver, &elms)) {
+ return CallJsBuiltin("ArrayPush", args);
+ }
+ JSArray* array = JSArray::cast(receiver);
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
@@ -359,7 +381,6 @@ BUILTIN(ArrayPush) {
ASSERT(to_add <= (Smi::kMaxValue - len));
int new_length = len + to_add;
- FixedArray* elms = FixedArray::cast(array->elements());
if (new_length > elms->length()) {
// New backing storage is needed.
@@ -390,14 +411,17 @@ BUILTIN(ArrayPush) {
BUILTIN(ArrayPop) {
- JSArray* array = JSArray::cast(*args.receiver());
- ASSERT(array->HasFastElements());
+ Object* receiver = *args.receiver();
+ FixedArray* elms = NULL;
+ if (!IsJSArrayWithFastElements(receiver, &elms)) {
+ return CallJsBuiltin("ArrayPop", args);
+ }
+ JSArray* array = JSArray::cast(receiver);
int len = Smi::cast(array->length())->value();
if (len == 0) return Heap::undefined_value();
// Get top element
- FixedArray* elms = FixedArray::cast(array->elements());
Object* top = elms->get(len - 1);
// Set the length.
@@ -420,18 +444,18 @@ BUILTIN(ArrayPop) {
BUILTIN(ArrayShift) {
- if (!ArrayPrototypeHasNoElements()) {
+ Object* receiver = *args.receiver();
+ FixedArray* elms = NULL;
+ if (!IsJSArrayWithFastElements(receiver, &elms)
+ || !ArrayPrototypeHasNoElements()) {
return CallJsBuiltin("ArrayShift", args);
}
-
- JSArray* array = JSArray::cast(*args.receiver());
+ JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value();
if (len == 0) return Heap::undefined_value();
- FixedArray* elms = FixedArray::cast(array->elements());
-
// Get first element
Object* first = elms->get(0);
if (first->IsTheHole()) {
@@ -451,26 +475,22 @@ BUILTIN(ArrayShift) {
BUILTIN(ArrayUnshift) {
- if (!ArrayPrototypeHasNoElements()) {
+ Object* receiver = *args.receiver();
+ FixedArray* elms = NULL;
+ if (!IsJSArrayWithFastElements(receiver, &elms)
+ || !ArrayPrototypeHasNoElements()) {
return CallJsBuiltin("ArrayUnshift", args);
}
-
- JSArray* array = JSArray::cast(*args.receiver());
+ JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
- // Note that we cannot quit early if to_add == 0 as
- // values should be lifted from prototype into
- // the array.
-
int new_length = len + to_add;
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
ASSERT(to_add <= (Smi::kMaxValue - len));
- FixedArray* elms = FixedArray::cast(array->elements());
-
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
@@ -503,11 +523,13 @@ BUILTIN(ArrayUnshift) {
BUILTIN(ArraySlice) {
- if (!ArrayPrototypeHasNoElements()) {
+ Object* receiver = *args.receiver();
+ FixedArray* elms = NULL;
+ if (!IsJSArrayWithFastElements(receiver, &elms)
+ || !ArrayPrototypeHasNoElements()) {
return CallJsBuiltin("ArraySlice", args);
}
-
- JSArray* array = JSArray::cast(*args.receiver());
+ JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value();
@@ -558,8 +580,6 @@ BUILTIN(ArraySlice) {
if (result->IsFailure()) return result;
FixedArray* result_elms = FixedArray::cast(result);
- FixedArray* elms = FixedArray::cast(array->elements());
-
AssertNoAllocation no_gc;
CopyElements(&no_gc, result_elms, 0, elms, k, result_len);
@@ -573,11 +593,13 @@ BUILTIN(ArraySlice) {
BUILTIN(ArraySplice) {
- if (!ArrayPrototypeHasNoElements()) {
+ Object* receiver = *args.receiver();
+ FixedArray* elms = NULL;
+ if (!IsJSArrayWithFastElements(receiver, &elms)
+ || !ArrayPrototypeHasNoElements()) {
return CallJsBuiltin("ArraySplice", args);
}
-
- JSArray* array = JSArray::cast(*args.receiver());
+ JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value();
@@ -618,8 +640,6 @@ BUILTIN(ArraySplice) {
}
int actual_delete_count = Min(Max(delete_count, 0), len - actual_start);
- FixedArray* elms = FixedArray::cast(array->elements());
-
JSArray* result_array = NULL;
if (actual_delete_count == 0) {
Object* result = AllocateEmptyJSArray();
@@ -766,20 +786,19 @@ static Object* HandleApiCallHelper(
HandleScope scope;
Handle function = args.called_function();
+ ASSERT(function->shared()->IsApiFunction());
+ FunctionTemplateInfo* fun_data = function->shared()->get_api_func_data();
if (is_construct) {
- Handle desc =
- Handle(
- FunctionTemplateInfo::cast(function->shared()->function_data()));
+ Handle desc(fun_data);
bool pending_exception = false;
Factory::ConfigureInstance(desc, Handle::cast(args.receiver()),
&pending_exception);
ASSERT(Top::has_pending_exception() == pending_exception);
if (pending_exception) return Failure::Exception();
+ fun_data = *desc;
}
- FunctionTemplateInfo* fun_data =
- FunctionTemplateInfo::cast(function->shared()->function_data());
Object* raw_holder = TypeCheck(args.length(), &args[0], fun_data);
if (raw_holder->IsNull()) {
@@ -850,8 +869,8 @@ BUILTIN(HandleApiCallConstruct) {
static void VerifyTypeCheck(Handle object,
Handle function) {
- FunctionTemplateInfo* info =
- FunctionTemplateInfo::cast(function->shared()->function_data());
+ ASSERT(function->shared()->IsApiFunction());
+ FunctionTemplateInfo* info = function->shared()->get_api_func_data();
if (info->signature()->IsUndefined()) return;
SignatureInfo* signature = SignatureInfo::cast(info->signature());
Object* receiver_type = signature->receiver();
@@ -935,9 +954,9 @@ static Object* HandleApiCallAsFunctionOrConstructor(
// used to create the called object.
ASSERT(obj->map()->has_instance_call_handler());
JSFunction* constructor = JSFunction::cast(obj->map()->constructor());
- Object* template_info = constructor->shared()->function_data();
+ ASSERT(constructor->shared()->IsApiFunction());
Object* handler =
- FunctionTemplateInfo::cast(template_info)->instance_call_handler();
+ constructor->shared()->get_api_func_data()->instance_call_handler();
ASSERT(!handler->IsUndefined());
CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
Object* callback_obj = call_data->callback();
diff --git a/deps/v8/src/cached_powers.h b/deps/v8/src/cached_powers.h
new file mode 100644
index 0000000000..7c3d234eec
--- /dev/null
+++ b/deps/v8/src/cached_powers.h
@@ -0,0 +1,119 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CACHED_POWERS_H_
+#define V8_CACHED_POWERS_H_
+
+#include "diy_fp.h"
+
+namespace v8 {
+namespace internal {
+
+struct CachedPower {
+ uint64_t significand;
+ int16_t binary_exponent;
+ int16_t decimal_exponent;
+};
+
+// The following defines implement the interface between this file and the
+// generated 'powers_ten.h'.
+// GRISU_CACHE_NAME(1) contains all possible cached powers.
+// GRISU_CACHE_NAME(i) contains GRISU_CACHE_NAME(1) where only every 'i'th
+// element is kept. More formally GRISU_CACHE_NAME(i) contains the elements j*i
+// with 0 <= j < k with k such that j*k < the size of GRISU_CACHE_NAME(1).
+// The higher 'i' is the fewer elements we use.
+// Given that there are less elements, the exponent-distance between two
+// elements in the cache grows. The variable GRISU_CACHE_MAX_DISTANCE(i) stores
+// the maximum distance between two elements.
+#define GRISU_CACHE_STRUCT CachedPower
+#define GRISU_CACHE_NAME(i) kCachedPowers##i
+#define GRISU_CACHE_MAX_DISTANCE(i) kCachedPowersMaxDistance##i
+#define GRISU_CACHE_OFFSET kCachedPowerOffset
+#define GRISU_UINT64_C V8_2PART_UINT64_C
+// The following include imports the precompiled cached powers.
+#include "powers_ten.h" // NOLINT
+
+static const double kD_1_LOG2_10 = 0.30102999566398114; // 1 / lg(10)
+
+// We can't use a function since we reference variables depending on the 'i'.
+// This way the compiler is able to see at compile time that only one
+// cache-array variable is used and thus can remove all the others.
+#define COMPUTE_FOR_CACHE(i) \
+ if (!found && (gamma - alpha + 1 >= GRISU_CACHE_MAX_DISTANCE(i))) { \
+ int kQ = DiyFp::kSignificandSize; \
+ double k = ceiling((alpha - e + kQ - 1) * kD_1_LOG2_10); \
+ int index = (GRISU_CACHE_OFFSET + static_cast(k) - 1) / i + 1; \
+ cached_power = GRISU_CACHE_NAME(i)[index]; \
+ found = true; \
+ } \
+
+static void GetCachedPower(int e, int alpha, int gamma, int* mk, DiyFp* c_mk) {
+ // The following if statement should be optimized by the compiler so that only
+ // one array is referenced and the others are not included in the object file.
+ bool found = false;
+ CachedPower cached_power;
+ COMPUTE_FOR_CACHE(20);
+ COMPUTE_FOR_CACHE(19);
+ COMPUTE_FOR_CACHE(18);
+ COMPUTE_FOR_CACHE(17);
+ COMPUTE_FOR_CACHE(16);
+ COMPUTE_FOR_CACHE(15);
+ COMPUTE_FOR_CACHE(14);
+ COMPUTE_FOR_CACHE(13);
+ COMPUTE_FOR_CACHE(12);
+ COMPUTE_FOR_CACHE(11);
+ COMPUTE_FOR_CACHE(10);
+ COMPUTE_FOR_CACHE(9);
+ COMPUTE_FOR_CACHE(8);
+ COMPUTE_FOR_CACHE(7);
+ COMPUTE_FOR_CACHE(6);
+ COMPUTE_FOR_CACHE(5);
+ COMPUTE_FOR_CACHE(4);
+ COMPUTE_FOR_CACHE(3);
+ COMPUTE_FOR_CACHE(2);
+ COMPUTE_FOR_CACHE(1);
+ if (!found) {
+ UNIMPLEMENTED();
+ // Silence compiler warnings.
+ cached_power.significand = 0;
+ cached_power.binary_exponent = 0;
+ cached_power.decimal_exponent = 0;
+ }
+ *c_mk = DiyFp(cached_power.significand, cached_power.binary_exponent);
+ *mk = cached_power.decimal_exponent;
+ ASSERT((alpha <= c_mk->e() + e) && (c_mk->e() + e <= gamma));
+}
+#undef GRISU_REDUCTION
+#undef GRISU_CACHE_STRUCT
+#undef GRISU_CACHE_NAME
+#undef GRISU_CACHE_MAX_DISTANCE
+#undef GRISU_CACHE_OFFSET
+#undef GRISU_UINT64_C
+
+} } // namespace v8::internal
+
+#endif // V8_CACHED_POWERS_H_
diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h
index eeb748b4a8..cdcd18ad22 100644
--- a/deps/v8/src/checks.h
+++ b/deps/v8/src/checks.h
@@ -80,6 +80,7 @@ static inline void CheckEqualsHelper(const char* file, int line,
}
}
+
// Helper function used by the CHECK_EQ function when given int64_t
// arguments. Should not be called directly.
static inline void CheckEqualsHelper(const char* file, int line,
@@ -202,6 +203,27 @@ static inline void CheckEqualsHelper(const char* file,
}
+static inline void CheckNonEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ double expected,
+ const char* value_source,
+ double value) {
+ // Force values to 64 bit memory to truncate 80 bit precision on IA32.
+ volatile double* exp = new double[1];
+ *exp = expected;
+ volatile double* val = new double[1];
+ *val = value;
+ if (*exp == *val) {
+ V8_Fatal(file, line,
+ "CHECK_NE(%s, %s) failed\n# Value: %f",
+ expected_source, value_source, *val);
+ }
+ delete[] exp;
+ delete[] val;
+}
+
+
namespace v8 {
class Value;
template class Handle;
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 6841c21612..f9913b9c15 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -351,42 +351,18 @@ void CodeGenerator::ProcessDeclarations(ZoneList* declarations) {
}
+// List of special runtime calls which are generated inline. For some of these
+// functions the code will be generated inline, and for others a call to a code
+// stub will be inlined.
+
+#define INLINE_RUNTIME_ENTRY(Name, argc, ressize) \
+ {&CodeGenerator::Generate##Name, "_" #Name, argc}, \
-// Special cases: These 'runtime calls' manipulate the current
-// frame and are only used 1 or two places, so we generate them
-// inline instead of generating calls to them. They are used
-// for implementing Function.prototype.call() and
-// Function.prototype.apply().
CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
- {&CodeGenerator::GenerateIsSmi, "_IsSmi"},
- {&CodeGenerator::GenerateIsNonNegativeSmi, "_IsNonNegativeSmi"},
- {&CodeGenerator::GenerateIsArray, "_IsArray"},
- {&CodeGenerator::GenerateIsRegExp, "_IsRegExp"},
- {&CodeGenerator::GenerateIsConstructCall, "_IsConstructCall"},
- {&CodeGenerator::GenerateArgumentsLength, "_ArgumentsLength"},
- {&CodeGenerator::GenerateArgumentsAccess, "_Arguments"},
- {&CodeGenerator::GenerateClassOf, "_ClassOf"},
- {&CodeGenerator::GenerateValueOf, "_ValueOf"},
- {&CodeGenerator::GenerateSetValueOf, "_SetValueOf"},
- {&CodeGenerator::GenerateFastCharCodeAt, "_FastCharCodeAt"},
- {&CodeGenerator::GenerateCharFromCode, "_CharFromCode"},
- {&CodeGenerator::GenerateObjectEquals, "_ObjectEquals"},
- {&CodeGenerator::GenerateLog, "_Log"},
- {&CodeGenerator::GenerateRandomPositiveSmi, "_RandomPositiveSmi"},
- {&CodeGenerator::GenerateIsObject, "_IsObject"},
- {&CodeGenerator::GenerateIsFunction, "_IsFunction"},
- {&CodeGenerator::GenerateIsUndetectableObject, "_IsUndetectableObject"},
- {&CodeGenerator::GenerateStringAdd, "_StringAdd"},
- {&CodeGenerator::GenerateSubString, "_SubString"},
- {&CodeGenerator::GenerateStringCompare, "_StringCompare"},
- {&CodeGenerator::GenerateRegExpExec, "_RegExpExec"},
- {&CodeGenerator::GenerateNumberToString, "_NumberToString"},
- {&CodeGenerator::GenerateMathPow, "_Math_pow"},
- {&CodeGenerator::GenerateMathSin, "_Math_sin"},
- {&CodeGenerator::GenerateMathCos, "_Math_cos"},
- {&CodeGenerator::GenerateMathSqrt, "_Math_sqrt"},
+ INLINE_RUNTIME_FUNCTION_LIST(INLINE_RUNTIME_ENTRY)
};
+#undef INLINE_RUNTIME_ENTRY
CodeGenerator::InlineRuntimeLUT* CodeGenerator::FindInlineRuntimeLUT(
Handle name) {
@@ -431,6 +407,14 @@ bool CodeGenerator::PatchInlineRuntimeEntry(Handle name,
}
+int CodeGenerator::InlineRuntimeCallArgumentsCount(Handle name) {
+ CodeGenerator::InlineRuntimeLUT* f =
+ CodeGenerator::FindInlineRuntimeLUT(name);
+ if (f != NULL) return f->nargs;
+ return -1;
+}
+
+
// Simple condition analysis. ALWAYS_TRUE and ALWAYS_FALSE represent a
// known result for the test expression, with no side effects.
CodeGenerator::ConditionAnalysis CodeGenerator::AnalyzeCondition(
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 8dcde84bbd..40ed6cefe6 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -99,6 +99,36 @@ namespace v8 {
namespace internal {
+#define INLINE_RUNTIME_FUNCTION_LIST(F) \
+ F(IsSmi, 1, 1) \
+ F(IsNonNegativeSmi, 1, 1) \
+ F(IsArray, 1, 1) \
+ F(IsRegExp, 1, 1) \
+ F(IsConstructCall, 0, 1) \
+ F(ArgumentsLength, 0, 1) \
+ F(Arguments, 1, 1) \
+ F(ClassOf, 1, 1) \
+ F(ValueOf, 1, 1) \
+ F(SetValueOf, 2, 1) \
+ F(FastCharCodeAt, 2, 1) \
+ F(CharFromCode, 1, 1) \
+ F(ObjectEquals, 2, 1) \
+ F(Log, 3, 1) \
+ F(RandomPositiveSmi, 0, 1) \
+ F(IsObject, 1, 1) \
+ F(IsFunction, 1, 1) \
+ F(IsUndetectableObject, 1, 1) \
+ F(StringAdd, 2, 1) \
+ F(SubString, 3, 1) \
+ F(StringCompare, 2, 1) \
+ F(RegExpExec, 4, 1) \
+ F(NumberToString, 1, 1) \
+ F(MathPow, 2, 1) \
+ F(MathSin, 1, 1) \
+ F(MathCos, 1, 1) \
+ F(MathSqrt, 1, 1)
+
+
// Support for "structured" code comments.
#ifdef DEBUG
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 9dcbeb5eeb..378a24e750 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -218,9 +218,7 @@ void CompilationSubCache::Iterate(ObjectVisitor* v) {
void CompilationSubCache::Clear() {
- for (int i = 0; i < generations_; i++) {
- tables_[i] = Heap::undefined_value();
- }
+ MemsetPointer(tables_, Heap::undefined_value(), generations_);
}
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index ebb62f11c2..dce881639b 100755
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -79,12 +79,32 @@ static Handle MakeCode(Handle context, CompilationInfo* info) {
return Handle::null();
}
+ if (function->scope()->num_parameters() > 0 ||
+ function->scope()->num_stack_slots()) {
+ AssignedVariablesAnalyzer ava(function);
+ ava.Analyze();
+ if (ava.HasStackOverflow()) {
+ return Handle::null();
+ }
+ }
+
if (FLAG_use_flow_graph) {
FlowGraphBuilder builder;
builder.Build(function);
+ if (!builder.HasStackOverflow()) {
+ int variable_count =
+ function->num_parameters() + function->scope()->num_stack_slots();
+ if (variable_count > 0 && builder.definitions()->length() > 0) {
+ ReachingDefinitions rd(builder.postorder(),
+ builder.definitions(),
+ variable_count);
+ rd.Compute();
+ }
+ }
+
#ifdef DEBUG
- if (FLAG_print_graph_text) {
+ if (FLAG_print_graph_text && !builder.HasStackOverflow()) {
builder.graph()->PrintText(builder.postorder());
}
#endif
@@ -463,12 +483,32 @@ Handle Compiler::BuildBoilerplate(FunctionLiteral* literal,
return Handle::null();
}
+ if (literal->scope()->num_parameters() > 0 ||
+ literal->scope()->num_stack_slots()) {
+ AssignedVariablesAnalyzer ava(literal);
+ ava.Analyze();
+ if (ava.HasStackOverflow()) {
+ return Handle::null();
+ }
+ }
+
if (FLAG_use_flow_graph) {
FlowGraphBuilder builder;
builder.Build(literal);
+ if (!builder.HasStackOverflow()) {
+ int variable_count =
+ literal->num_parameters() + literal->scope()->num_stack_slots();
+ if (variable_count > 0 && builder.definitions()->length() > 0) {
+ ReachingDefinitions rd(builder.postorder(),
+ builder.definitions(),
+ variable_count);
+ rd.Compute();
+ }
+ }
+
#ifdef DEBUG
- if (FLAG_print_graph_text) {
+ if (FLAG_print_graph_text && !builder.HasStackOverflow()) {
builder.graph()->PrintText(builder.postorder());
}
#endif
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 98ebc479f7..499774172a 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -50,12 +50,6 @@ enum ContextLookupFlags {
// must always be allocated via Heap::AllocateContext() or
// Factory::NewContext.
-// Comment for special_function_table:
-// Table for providing optimized/specialized functions.
-// The array contains triplets [object, general_function, optimized_function].
-// Primarily added to support built-in optimized variants of
-// Array.prototype.{push,pop}.
-
#define GLOBAL_CONTEXT_FIELDS(V) \
V(GLOBAL_PROXY_INDEX, JSObject, global_proxy_object) \
V(SECURITY_TOKEN_INDEX, Object, security_token) \
@@ -82,7 +76,6 @@ enum ContextLookupFlags {
V(FUNCTION_MAP_INDEX, Map, function_map) \
V(FUNCTION_INSTANCE_MAP_INDEX, Map, function_instance_map) \
V(JS_ARRAY_MAP_INDEX, Map, js_array_map)\
- V(SPECIAL_FUNCTION_TABLE_INDEX, FixedArray, special_function_table) \
V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \
V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \
V(MAKE_MESSAGE_FUN_INDEX, JSFunction, make_message_fun) \
@@ -206,7 +199,6 @@ class Context: public FixedArray {
GLOBAL_EVAL_FUN_INDEX,
INSTANTIATE_FUN_INDEX,
CONFIGURE_INSTANCE_FUN_INDEX,
- SPECIAL_FUNCTION_TABLE_INDEX,
MESSAGE_LISTENERS_INDEX,
MAKE_MESSAGE_FUN_INDEX,
GET_STACK_TRACE_LINE_INDEX,
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index fd6d38d84d..864b6259c0 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -31,6 +31,7 @@
#include "conversions-inl.h"
#include "factory.h"
+#include "grisu3.h"
#include "scanner.h"
namespace v8 {
@@ -382,8 +383,17 @@ const char* DoubleToCString(double v, Vector buffer) {
int decimal_point;
int sign;
- char* decimal_rep = dtoa(v, 0, 0, &decimal_point, &sign, NULL);
- int length = StrLength(decimal_rep);
+ char* decimal_rep;
+ bool used_dtoa = false;
+ char grisu_buffer[kGrisu3MaximalLength + 1];
+ int length;
+ if (grisu3(v, grisu_buffer, &sign, &length, &decimal_point)) {
+ decimal_rep = grisu_buffer;
+ } else {
+ decimal_rep = dtoa(v, 0, 0, &decimal_point, &sign, NULL);
+ used_dtoa = true;
+ length = StrLength(decimal_rep);
+ }
if (sign) builder.AddCharacter('-');
@@ -418,7 +428,7 @@ const char* DoubleToCString(double v, Vector buffer) {
builder.AddFormatted("%d", exponent);
}
- freedtoa(decimal_rep);
+ if (used_dtoa) freedtoa(decimal_rep);
}
}
return builder.Finalize();
diff --git a/deps/v8/src/data-flow.cc b/deps/v8/src/data-flow.cc
index 6b45da02b9..f6ccef1a13 100644
--- a/deps/v8/src/data-flow.cc
+++ b/deps/v8/src/data-flow.cc
@@ -28,73 +28,76 @@
#include "v8.h"
#include "data-flow.h"
+#include "scopes.h"
namespace v8 {
namespace internal {
void FlowGraph::AppendInstruction(AstNode* instruction) {
+ // Add a (non-null) AstNode to the end of the graph fragment.
ASSERT(instruction != NULL);
- if (is_empty() || !exit()->IsBlockNode()) {
- AppendNode(new BlockNode());
- }
+ if (exit()->IsExitNode()) return;
+ if (!exit()->IsBlockNode()) AppendNode(new BlockNode());
BlockNode::cast(exit())->AddInstruction(instruction);
}
void FlowGraph::AppendNode(Node* node) {
+ // Add a node to the end of the graph. An empty block is added to
+ // maintain edge-split form (that no join nodes or exit nodes as
+ // successors to branch nodes).
ASSERT(node != NULL);
- if (is_empty()) {
- entry_ = exit_ = node;
- } else {
- exit()->AddSuccessor(node);
- node->AddPredecessor(exit());
- exit_ = node;
+ if (exit()->IsExitNode()) return;
+ if (exit()->IsBranchNode() && (node->IsJoinNode() || node->IsExitNode())) {
+ AppendNode(new BlockNode());
}
+ exit()->AddSuccessor(node);
+ node->AddPredecessor(exit());
+ exit_ = node;
}
void FlowGraph::AppendGraph(FlowGraph* graph) {
- ASSERT(!graph->is_empty());
- if (is_empty()) {
- entry_ = graph->entry();
- exit_ = graph->exit();
- } else {
- exit()->AddSuccessor(graph->entry());
- graph->entry()->AddPredecessor(exit());
- exit_ = graph->exit();
+ // Add a flow graph fragment to the end of this one. An empty block is
+ // added to maintain edge-split form (that no join nodes or exit nodes as
+ // successors to branch nodes).
+ ASSERT(graph != NULL);
+ if (exit()->IsExitNode()) return;
+ Node* node = graph->entry();
+ if (exit()->IsBranchNode() && (node->IsJoinNode() || node->IsExitNode())) {
+ AppendNode(new BlockNode());
}
+ exit()->AddSuccessor(node);
+ node->AddPredecessor(exit());
+ exit_ = graph->exit();
}
void FlowGraph::Split(BranchNode* branch,
FlowGraph* left,
FlowGraph* right,
- JoinNode* merge) {
- // Graphs are in edge split form. Add empty blocks if necessary.
- if (left->is_empty()) left->AppendNode(new BlockNode());
- if (right->is_empty()) right->AppendNode(new BlockNode());
-
- // Add the branch, left flowgraph and merge.
+ JoinNode* join) {
+ // Add the branch node, left flowgraph, join node.
AppendNode(branch);
AppendGraph(left);
- AppendNode(merge);
+ AppendNode(join);
// Splice in the right flowgraph.
- right->AppendNode(merge);
+ right->AppendNode(join);
branch->AddSuccessor(right->entry());
right->entry()->AddPredecessor(branch);
}
-void FlowGraph::Loop(JoinNode* merge,
+void FlowGraph::Loop(JoinNode* join,
FlowGraph* condition,
BranchNode* branch,
FlowGraph* body) {
- // Add the merge, condition and branch. Add merge's predecessors in
+ // Add the join, condition and branch. Add join's predecessors in
// left-to-right order.
- AppendNode(merge);
- body->AppendNode(merge);
+ AppendNode(join);
+ body->AppendNode(join);
AppendGraph(condition);
AppendNode(branch);
@@ -104,19 +107,6 @@ void FlowGraph::Loop(JoinNode* merge,
}
-void EntryNode::Traverse(bool mark,
- ZoneList* preorder,
- ZoneList* postorder) {
- ASSERT(successor_ != NULL);
- preorder->Add(this);
- if (!successor_->IsMarkedWith(mark)) {
- successor_->MarkWith(mark);
- successor_->Traverse(mark, preorder, postorder);
- }
- postorder->Add(this);
-}
-
-
void ExitNode::Traverse(bool mark,
ZoneList* preorder,
ZoneList* postorder) {
@@ -143,14 +133,14 @@ void BranchNode::Traverse(bool mark,
ZoneList* postorder) {
ASSERT(successor0_ != NULL && successor1_ != NULL);
preorder->Add(this);
- if (!successor0_->IsMarkedWith(mark)) {
- successor0_->MarkWith(mark);
- successor0_->Traverse(mark, preorder, postorder);
- }
if (!successor1_->IsMarkedWith(mark)) {
successor1_->MarkWith(mark);
successor1_->Traverse(mark, preorder, postorder);
}
+ if (!successor0_->IsMarkedWith(mark)) {
+ successor0_->MarkWith(mark);
+ successor0_->Traverse(mark, preorder, postorder);
+ }
postorder->Add(this);
}
@@ -169,16 +159,15 @@ void JoinNode::Traverse(bool mark,
void FlowGraphBuilder::Build(FunctionLiteral* lit) {
- graph_ = FlowGraph::Empty();
- graph_.AppendNode(new EntryNode());
global_exit_ = new ExitNode();
VisitStatements(lit->body());
- if (HasStackOverflow()) {
- graph_ = FlowGraph::Empty();
- return;
- }
+ if (HasStackOverflow()) return;
+ // The graph can end with a branch node (if the function ended with a
+ // loop). Maintain edge-split form (no join nodes or exit nodes as
+ // successors to branch nodes).
+ if (graph_.exit()->IsBranchNode()) graph_.AppendNode(new BlockNode());
graph_.AppendNode(global_exit_);
// Build preorder and postorder traversal orders. All the nodes in
@@ -222,6 +211,7 @@ void FlowGraphBuilder::VisitIfStatement(IfStatement* stmt) {
graph_ = FlowGraph::Empty();
Visit(stmt->else_statement());
+ if (HasStackOverflow()) return;
JoinNode* join = new JoinNode();
original.Split(branch, &left, &graph_, join);
graph_ = original;
@@ -239,20 +229,17 @@ void FlowGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
void FlowGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
- Visit(stmt->expression());
- graph_.AppendInstruction(stmt);
- graph_.AppendNode(global_exit());
+ SetStackOverflow();
}
void FlowGraphBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
- Visit(stmt->expression());
- graph_.AppendInstruction(stmt);
+ SetStackOverflow();
}
void FlowGraphBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
- graph_.AppendInstruction(stmt);
+ SetStackOverflow();
}
@@ -262,44 +249,12 @@ void FlowGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
void FlowGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
- JoinNode* join = new JoinNode();
- FlowGraph original = graph_;
- graph_ = FlowGraph::Empty();
- Visit(stmt->body());
-
- FlowGraph body = graph_;
- graph_ = FlowGraph::Empty();
- Visit(stmt->cond());
-
- BranchNode* branch = new BranchNode();
-
- // Add body, condition and branch.
- original.AppendNode(join);
- original.AppendGraph(&body);
- original.AppendGraph(&graph_); // The condition.
- original.AppendNode(branch);
-
- // Tie the knot.
- branch->AddSuccessor(join);
- join->AddPredecessor(branch);
-
- graph_ = original;
+ SetStackOverflow();
}
void FlowGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
- JoinNode* join = new JoinNode();
- FlowGraph original = graph_;
- graph_ = FlowGraph::Empty();
- Visit(stmt->cond());
-
- BranchNode* branch = new BranchNode();
- FlowGraph condition = graph_;
- graph_ = FlowGraph::Empty();
- Visit(stmt->body());
-
- original.Loop(join, &condition, branch, &graph_);
- graph_ = original;
+ SetStackOverflow();
}
@@ -318,23 +273,14 @@ void FlowGraphBuilder::VisitForStatement(ForStatement* stmt) {
if (stmt->next() != NULL) Visit(stmt->next());
+ if (HasStackOverflow()) return;
original.Loop(join, &condition, branch, &graph_);
graph_ = original;
}
void FlowGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
- Visit(stmt->enumerable());
-
- JoinNode* join = new JoinNode();
- FlowGraph empty;
- BranchNode* branch = new BranchNode();
- FlowGraph original = graph_;
- graph_ = FlowGraph::Empty();
- Visit(stmt->body());
-
- original.Loop(join, &empty, branch, &graph_);
- graph_ = original;
+ SetStackOverflow();
}
@@ -349,36 +295,23 @@ void FlowGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
void FlowGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
- graph_.AppendInstruction(stmt);
+ SetStackOverflow();
}
void FlowGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
- graph_.AppendInstruction(expr);
+ SetStackOverflow();
}
void FlowGraphBuilder::VisitFunctionBoilerplateLiteral(
FunctionBoilerplateLiteral* expr) {
- graph_.AppendInstruction(expr);
+ SetStackOverflow();
}
void FlowGraphBuilder::VisitConditional(Conditional* expr) {
- Visit(expr->condition());
-
- BranchNode* branch = new BranchNode();
- FlowGraph original = graph_;
- graph_ = FlowGraph::Empty();
- Visit(expr->then_expression());
-
- FlowGraph left = graph_;
- graph_ = FlowGraph::Empty();
- Visit(expr->else_expression());
-
- JoinNode* join = new JoinNode();
- original.Split(branch, &left, &graph_, join);
- graph_ = original;
+ SetStackOverflow();
}
@@ -398,30 +331,22 @@ void FlowGraphBuilder::VisitLiteral(Literal* expr) {
void FlowGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
- graph_.AppendInstruction(expr);
+ SetStackOverflow();
}
void FlowGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
- ZoneList* properties = expr->properties();
- for (int i = 0, len = properties->length(); i < len; i++) {
- Visit(properties->at(i)->value());
- }
- graph_.AppendInstruction(expr);
+ SetStackOverflow();
}
void FlowGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
- ZoneList* values = expr->values();
- for (int i = 0, len = values->length(); i < len; i++) {
- Visit(values->at(i));
- }
- graph_.AppendInstruction(expr);
+ SetStackOverflow();
}
void FlowGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- graph_.AppendInstruction(expr);
+ SetStackOverflow();
}
@@ -433,26 +358,32 @@ void FlowGraphBuilder::VisitAssignment(Assignment* expr) {
ASSERT(var == NULL || prop == NULL);
if (var != NULL) {
Visit(expr->value());
- if (var->IsStackAllocated()) definitions_.Add(expr);
+ if (var->IsStackAllocated()) {
+ expr->set_num(definitions_.length());
+ definitions_.Add(expr);
+ }
} else if (prop != NULL) {
Visit(prop->obj());
if (!prop->key()->IsPropertyName()) Visit(prop->key());
Visit(expr->value());
}
+
+ if (HasStackOverflow()) return;
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitThrow(Throw* expr) {
- Visit(expr->exception());
- graph_.AppendInstruction(expr);
+ SetStackOverflow();
}
void FlowGraphBuilder::VisitProperty(Property* expr) {
Visit(expr->obj());
if (!expr->key()->IsPropertyName()) Visit(expr->key());
+
+ if (HasStackOverflow()) return;
graph_.AppendInstruction(expr);
}
@@ -463,32 +394,42 @@ void FlowGraphBuilder::VisitCall(Call* expr) {
for (int i = 0, len = arguments->length(); i < len; i++) {
Visit(arguments->at(i));
}
+
+ if (HasStackOverflow()) return;
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitCallNew(CallNew* expr) {
- Visit(expr->expression());
- ZoneList* arguments = expr->arguments();
- for (int i = 0, len = arguments->length(); i < len; i++) {
- Visit(arguments->at(i));
- }
- graph_.AppendInstruction(expr);
+ SetStackOverflow();
}
void FlowGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
- ZoneList* arguments = expr->arguments();
- for (int i = 0, len = arguments->length(); i < len; i++) {
- Visit(arguments->at(i));
- }
- graph_.AppendInstruction(expr);
+ SetStackOverflow();
}
void FlowGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
- Visit(expr->expression());
- graph_.AppendInstruction(expr);
+ switch (expr->op()) {
+ case Token::NOT:
+ case Token::BIT_NOT:
+ case Token::DELETE:
+ case Token::TYPEOF:
+ case Token::VOID:
+ SetStackOverflow();
+ break;
+
+ case Token::ADD:
+ case Token::SUB:
+ Visit(expr->expression());
+ if (HasStackOverflow()) return;
+ graph_.AppendInstruction(expr);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
}
@@ -496,56 +437,37 @@ void FlowGraphBuilder::VisitCountOperation(CountOperation* expr) {
Visit(expr->expression());
Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
if (var != NULL && var->IsStackAllocated()) {
+ expr->set_num(definitions_.length());
definitions_.Add(expr);
}
+
+ if (HasStackOverflow()) return;
graph_.AppendInstruction(expr);
}
void FlowGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
- Visit(expr->left());
-
switch (expr->op()) {
case Token::COMMA:
- Visit(expr->right());
- break;
-
- case Token::OR: {
- BranchNode* branch = new BranchNode();
- FlowGraph original = graph_;
- graph_ = FlowGraph::Empty();
- Visit(expr->right());
- FlowGraph empty;
- JoinNode* join = new JoinNode();
- original.Split(branch, &empty, &graph_, join);
- graph_ = original;
+ case Token::OR:
+ case Token::AND:
+ SetStackOverflow();
break;
- }
-
- case Token::AND: {
- BranchNode* branch = new BranchNode();
- FlowGraph original = graph_;
- graph_ = FlowGraph::Empty();
- Visit(expr->right());
- FlowGraph empty;
- JoinNode* join = new JoinNode();
- original.Split(branch, &graph_, &empty, join);
- graph_ = original;
- break;
- }
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND:
case Token::SHL:
- case Token::SAR:
case Token::SHR:
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV:
case Token::MOD:
+ case Token::SAR:
+ Visit(expr->left());
Visit(expr->right());
+ if (HasStackOverflow()) return;
graph_.AppendInstruction(expr);
break;
@@ -556,14 +478,34 @@ void FlowGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
void FlowGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
- Visit(expr->left());
- Visit(expr->right());
- graph_.AppendInstruction(expr);
+ switch (expr->op()) {
+ case Token::EQ:
+ case Token::NE:
+ case Token::EQ_STRICT:
+ case Token::NE_STRICT:
+ case Token::INSTANCEOF:
+ case Token::IN:
+ SetStackOverflow();
+ break;
+
+ case Token::LT:
+ case Token::GT:
+ case Token::LTE:
+ case Token::GTE:
+ Visit(expr->left());
+ Visit(expr->right());
+ if (HasStackOverflow()) return;
+ graph_.AppendInstruction(expr);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
}
void FlowGraphBuilder::VisitThisFunction(ThisFunction* expr) {
- graph_.AppendInstruction(expr);
+ SetStackOverflow();
}
@@ -1098,6 +1040,451 @@ void LivenessAnalyzer::VisitThisFunction(ThisFunction* expr) {
}
+AssignedVariablesAnalyzer::AssignedVariablesAnalyzer(FunctionLiteral* fun)
+ : fun_(fun),
+ av_(fun->scope()->num_parameters() + fun->scope()->num_stack_slots()) {}
+
+
+void AssignedVariablesAnalyzer::Analyze() {
+ ASSERT(av_.length() > 0);
+ VisitStatements(fun_->body());
+}
+
+
+Variable* AssignedVariablesAnalyzer::FindSmiLoopVariable(ForStatement* stmt) {
+ // The loop must have all necessary parts.
+ if (stmt->init() == NULL || stmt->cond() == NULL || stmt->next() == NULL) {
+ return NULL;
+ }
+ // The initialization statement has to be a simple assignment.
+ Assignment* init = stmt->init()->StatementAsSimpleAssignment();
+ if (init == NULL) return NULL;
+
+ // We only deal with local variables.
+ Variable* loop_var = init->target()->AsVariableProxy()->AsVariable();
+ if (loop_var == NULL || !loop_var->IsStackAllocated()) return NULL;
+
+ // The initial value has to be a smi.
+ Literal* init_lit = init->value()->AsLiteral();
+ if (init_lit == NULL || !init_lit->handle()->IsSmi()) return NULL;
+ int init_value = Smi::cast(*init_lit->handle())->value();
+
+ // The condition must be a compare of variable with <, <=, >, or >=.
+ CompareOperation* cond = stmt->cond()->AsCompareOperation();
+ if (cond == NULL) return NULL;
+ if (cond->op() != Token::LT
+ && cond->op() != Token::LTE
+ && cond->op() != Token::GT
+ && cond->op() != Token::GTE) return NULL;
+
+ // The lhs must be the same variable as in the init expression.
+ if (cond->left()->AsVariableProxy()->AsVariable() != loop_var) return NULL;
+
+ // The rhs must be a smi.
+ Literal* term_lit = cond->right()->AsLiteral();
+ if (term_lit == NULL || !term_lit->handle()->IsSmi()) return NULL;
+ int term_value = Smi::cast(*term_lit->handle())->value();
+
+ // The count operation updates the same variable as in the init expression.
+ CountOperation* update = stmt->next()->StatementAsCountOperation();
+ if (update == NULL) return NULL;
+ if (update->expression()->AsVariableProxy()->AsVariable() != loop_var) {
+ return NULL;
+ }
+
+ // The direction of the count operation must agree with the start and the end
+ // value. We currently do not allow the initial value to be the same as the
+ // terminal value. This _would_ be ok as long as the loop body never executes
+ // or executes exactly one time.
+ if (init_value == term_value) return NULL;
+ if (init_value < term_value && update->op() != Token::INC) return NULL;
+ if (init_value > term_value && update->op() != Token::DEC) return NULL;
+
+ // Found a smi loop variable.
+ return loop_var;
+}
+
+int AssignedVariablesAnalyzer::BitIndex(Variable* var) {
+ ASSERT(var != NULL);
+ ASSERT(var->IsStackAllocated());
+ Slot* slot = var->slot();
+ if (slot->type() == Slot::PARAMETER) {
+ return slot->index();
+ } else {
+ return fun_->scope()->num_parameters() + slot->index();
+ }
+}
+
+
+void AssignedVariablesAnalyzer::RecordAssignedVar(Variable* var) {
+ ASSERT(var != NULL);
+ if (var->IsStackAllocated()) {
+ av_.Add(BitIndex(var));
+ }
+}
+
+
+void AssignedVariablesAnalyzer::MarkIfTrivial(Expression* expr) {
+ Variable* var = expr->AsVariableProxy()->AsVariable();
+ if (var != NULL &&
+ var->IsStackAllocated() &&
+ !var->is_arguments() &&
+ var->mode() != Variable::CONST &&
+ (var->is_this() || !av_.Contains(BitIndex(var)))) {
+ expr->AsVariableProxy()->set_is_trivial(true);
+ }
+}
+
+
+void AssignedVariablesAnalyzer::ProcessExpression(Expression* expr) {
+ BitVector saved_av(av_);
+ av_.Clear();
+ Visit(expr);
+ av_.Union(saved_av);
+}
+
+void AssignedVariablesAnalyzer::VisitBlock(Block* stmt) {
+ VisitStatements(stmt->statements());
+}
+
+
+void AssignedVariablesAnalyzer::VisitExpressionStatement(
+ ExpressionStatement* stmt) {
+ ProcessExpression(stmt->expression());
+}
+
+
+void AssignedVariablesAnalyzer::VisitEmptyStatement(EmptyStatement* stmt) {
+ // Do nothing.
+}
+
+
+void AssignedVariablesAnalyzer::VisitIfStatement(IfStatement* stmt) {
+ ProcessExpression(stmt->condition());
+ Visit(stmt->then_statement());
+ Visit(stmt->else_statement());
+}
+
+
+void AssignedVariablesAnalyzer::VisitContinueStatement(
+ ContinueStatement* stmt) {
+ // Nothing to do.
+}
+
+
+void AssignedVariablesAnalyzer::VisitBreakStatement(BreakStatement* stmt) {
+ // Nothing to do.
+}
+
+
+void AssignedVariablesAnalyzer::VisitReturnStatement(ReturnStatement* stmt) {
+ ProcessExpression(stmt->expression());
+}
+
+
+void AssignedVariablesAnalyzer::VisitWithEnterStatement(
+ WithEnterStatement* stmt) {
+ ProcessExpression(stmt->expression());
+}
+
+
+void AssignedVariablesAnalyzer::VisitWithExitStatement(
+ WithExitStatement* stmt) {
+ // Nothing to do.
+}
+
+
+void AssignedVariablesAnalyzer::VisitSwitchStatement(SwitchStatement* stmt) {
+ BitVector result(av_);
+ av_.Clear();
+ Visit(stmt->tag());
+ result.Union(av_);
+ for (int i = 0; i < stmt->cases()->length(); i++) {
+ CaseClause* clause = stmt->cases()->at(i);
+ if (!clause->is_default()) {
+ av_.Clear();
+ Visit(clause->label());
+ result.Union(av_);
+ }
+ VisitStatements(clause->statements());
+ }
+ av_.Union(result);
+}
+
+
+void AssignedVariablesAnalyzer::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ ProcessExpression(stmt->cond());
+ Visit(stmt->body());
+}
+
+
+void AssignedVariablesAnalyzer::VisitWhileStatement(WhileStatement* stmt) {
+ ProcessExpression(stmt->cond());
+ Visit(stmt->body());
+}
+
+
+void AssignedVariablesAnalyzer::VisitForStatement(ForStatement* stmt) {
+ if (stmt->init() != NULL) Visit(stmt->init());
+
+ if (stmt->cond() != NULL) ProcessExpression(stmt->cond());
+
+ if (stmt->next() != NULL) Visit(stmt->next());
+
+ // Process loop body. After visiting the loop body av_ contains
+ // the assigned variables of the loop body.
+ BitVector saved_av(av_);
+ av_.Clear();
+ Visit(stmt->body());
+
+ Variable* var = FindSmiLoopVariable(stmt);
+ if (var != NULL && !av_.Contains(BitIndex(var))) {
+ stmt->set_loop_variable(var);
+ }
+
+ av_.Union(saved_av);
+}
+
+
+void AssignedVariablesAnalyzer::VisitForInStatement(ForInStatement* stmt) {
+ ProcessExpression(stmt->each());
+ ProcessExpression(stmt->enumerable());
+ Visit(stmt->body());
+}
+
+
+void AssignedVariablesAnalyzer::VisitTryCatchStatement(
+ TryCatchStatement* stmt) {
+ Visit(stmt->try_block());
+ Visit(stmt->catch_block());
+}
+
+
+void AssignedVariablesAnalyzer::VisitTryFinallyStatement(
+ TryFinallyStatement* stmt) {
+ Visit(stmt->try_block());
+ Visit(stmt->finally_block());
+}
+
+
+void AssignedVariablesAnalyzer::VisitDebuggerStatement(
+ DebuggerStatement* stmt) {
+ // Nothing to do.
+}
+
+
+void AssignedVariablesAnalyzer::VisitFunctionLiteral(FunctionLiteral* expr) {
+ // Nothing to do.
+ ASSERT(av_.IsEmpty());
+}
+
+
+void AssignedVariablesAnalyzer::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* expr) {
+ // Nothing to do.
+ ASSERT(av_.IsEmpty());
+}
+
+
+void AssignedVariablesAnalyzer::VisitConditional(Conditional* expr) {
+ ASSERT(av_.IsEmpty());
+
+ Visit(expr->condition());
+
+ BitVector result(av_);
+ av_.Clear();
+ Visit(expr->then_expression());
+ result.Union(av_);
+
+ av_.Clear();
+ Visit(expr->else_expression());
+ av_.Union(result);
+}
+
+
+void AssignedVariablesAnalyzer::VisitSlot(Slot* expr) {
+ UNREACHABLE();
+}
+
+
+void AssignedVariablesAnalyzer::VisitVariableProxy(VariableProxy* expr) {
+ // Nothing to do.
+ ASSERT(av_.IsEmpty());
+}
+
+
+void AssignedVariablesAnalyzer::VisitLiteral(Literal* expr) {
+ // Nothing to do.
+ ASSERT(av_.IsEmpty());
+}
+
+
+void AssignedVariablesAnalyzer::VisitRegExpLiteral(RegExpLiteral* expr) {
+ // Nothing to do.
+ ASSERT(av_.IsEmpty());
+}
+
+
+void AssignedVariablesAnalyzer::VisitObjectLiteral(ObjectLiteral* expr) {
+ ASSERT(av_.IsEmpty());
+ BitVector result(av_.length());
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ Visit(expr->properties()->at(i)->value());
+ result.Union(av_);
+ av_.Clear();
+ }
+ av_.CopyFrom(result);
+}
+
+
+void AssignedVariablesAnalyzer::VisitArrayLiteral(ArrayLiteral* expr) {
+ ASSERT(av_.IsEmpty());
+ BitVector result(av_.length());
+ for (int i = 0; i < expr->values()->length(); i++) {
+ Visit(expr->values()->at(i));
+ result.Union(av_);
+ av_.Clear();
+ }
+ av_.CopyFrom(result);
+}
+
+
+void AssignedVariablesAnalyzer::VisitCatchExtensionObject(
+ CatchExtensionObject* expr) {
+ ASSERT(av_.IsEmpty());
+ Visit(expr->key());
+ ProcessExpression(expr->value());
+}
+
+
+void AssignedVariablesAnalyzer::VisitAssignment(Assignment* expr) {
+ ASSERT(av_.IsEmpty());
+
+ if (expr->target()->AsProperty() != NULL) {
+ // Visit receiver and key of property store and rhs.
+ Visit(expr->target()->AsProperty()->obj());
+ ProcessExpression(expr->target()->AsProperty()->key());
+ ProcessExpression(expr->value());
+
+ // If we have a variable as a receiver in a property store, check if
+ // we can mark it as trivial.
+ MarkIfTrivial(expr->target()->AsProperty()->obj());
+ } else {
+ Visit(expr->target());
+ ProcessExpression(expr->value());
+
+ Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+ if (var != NULL) RecordAssignedVar(var);
+ }
+}
+
+
+void AssignedVariablesAnalyzer::VisitThrow(Throw* expr) {
+ ASSERT(av_.IsEmpty());
+ Visit(expr->exception());
+}
+
+
+void AssignedVariablesAnalyzer::VisitProperty(Property* expr) {
+ ASSERT(av_.IsEmpty());
+ Visit(expr->obj());
+ ProcessExpression(expr->key());
+
+ // In case we have a variable as a receiver, check if we can mark
+ // it as trivial.
+ MarkIfTrivial(expr->obj());
+}
+
+
+void AssignedVariablesAnalyzer::VisitCall(Call* expr) {
+ ASSERT(av_.IsEmpty());
+ Visit(expr->expression());
+ BitVector result(av_);
+ for (int i = 0; i < expr->arguments()->length(); i++) {
+ av_.Clear();
+ Visit(expr->arguments()->at(i));
+ result.Union(av_);
+ }
+ av_.CopyFrom(result);
+}
+
+
+void AssignedVariablesAnalyzer::VisitCallNew(CallNew* expr) {
+ ASSERT(av_.IsEmpty());
+ Visit(expr->expression());
+ BitVector result(av_);
+ for (int i = 0; i < expr->arguments()->length(); i++) {
+ av_.Clear();
+ Visit(expr->arguments()->at(i));
+ result.Union(av_);
+ }
+ av_.CopyFrom(result);
+}
+
+
+void AssignedVariablesAnalyzer::VisitCallRuntime(CallRuntime* expr) {
+ ASSERT(av_.IsEmpty());
+ BitVector result(av_);
+ for (int i = 0; i < expr->arguments()->length(); i++) {
+ av_.Clear();
+ Visit(expr->arguments()->at(i));
+ result.Union(av_);
+ }
+ av_.CopyFrom(result);
+}
+
+
+void AssignedVariablesAnalyzer::VisitUnaryOperation(UnaryOperation* expr) {
+ ASSERT(av_.IsEmpty());
+ Visit(expr->expression());
+}
+
+
+void AssignedVariablesAnalyzer::VisitCountOperation(CountOperation* expr) {
+ ASSERT(av_.IsEmpty());
+
+ Visit(expr->expression());
+
+ Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+ if (var != NULL) RecordAssignedVar(var);
+}
+
+
+void AssignedVariablesAnalyzer::VisitBinaryOperation(BinaryOperation* expr) {
+ ASSERT(av_.IsEmpty());
+ Visit(expr->left());
+
+ ProcessExpression(expr->right());
+
+ // In case we have a variable on the left side, check if we can mark
+ // it as trivial.
+ MarkIfTrivial(expr->left());
+}
+
+
+void AssignedVariablesAnalyzer::VisitCompareOperation(CompareOperation* expr) {
+ ASSERT(av_.IsEmpty());
+ Visit(expr->left());
+
+ ProcessExpression(expr->right());
+
+ // In case we have a variable on the left side, check if we can mark
+ // it as trivial.
+ MarkIfTrivial(expr->left());
+}
+
+
+void AssignedVariablesAnalyzer::VisitThisFunction(ThisFunction* expr) {
+ // Nothing to do.
+ ASSERT(av_.IsEmpty());
+}
+
+
+void AssignedVariablesAnalyzer::VisitDeclaration(Declaration* decl) {
+ UNREACHABLE();
+}
+
+
#ifdef DEBUG
// Print a textual representation of an instruction in a flow graph. Using
@@ -1105,7 +1492,10 @@ void LivenessAnalyzer::VisitThisFunction(ThisFunction* expr) {
// only used for printing in debug mode.
class TextInstructionPrinter: public AstVisitor {
public:
- TextInstructionPrinter() {}
+ TextInstructionPrinter() : number_(0) {}
+
+ int NextNumber() { return number_; }
+ void AssignNumber(AstNode* node) { node->set_num(number_++); }
private:
// AST node visit functions.
@@ -1113,6 +1503,8 @@ class TextInstructionPrinter: public AstVisitor {
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
+ int number_;
+
DISALLOW_COPY_AND_ASSIGN(TextInstructionPrinter);
};
@@ -1233,8 +1625,7 @@ void TextInstructionPrinter::VisitSlot(Slot* expr) {
void TextInstructionPrinter::VisitVariableProxy(VariableProxy* expr) {
Variable* var = expr->AsVariable();
if (var != NULL) {
- SmartPointer name = var->name()->ToCString();
- PrintF("%s", *name);
+ PrintF("%s", *var->name()->ToCString());
} else {
ASSERT(expr->AsProperty() != NULL);
VisitProperty(expr->AsProperty());
@@ -1273,9 +1664,8 @@ void TextInstructionPrinter::VisitAssignment(Assignment* expr) {
Property* prop = expr->target()->AsProperty();
if (var != NULL) {
- SmartPointer name = var->name()->ToCString();
PrintF("%s %s @%d",
- *name,
+ *var->name()->ToCString(),
Token::String(expr->op()),
expr->value()->num());
} else if (prop != NULL) {
@@ -1297,6 +1687,10 @@ void TextInstructionPrinter::VisitAssignment(Assignment* expr) {
// Throw reference error.
Visit(expr->target());
}
+
+ if (expr->num() != AstNode::kNoNumber) {
+ PrintF(" ;; D%d", expr->num());
+ }
}
@@ -1339,8 +1733,7 @@ void TextInstructionPrinter::VisitCallNew(CallNew* expr) {
void TextInstructionPrinter::VisitCallRuntime(CallRuntime* expr) {
- SmartPointer name = expr->name()->ToCString();
- PrintF("%s(", *name);
+ PrintF("%s(", *expr->name()->ToCString());
ZoneList* arguments = expr->arguments();
for (int i = 0, len = arguments->length(); i < len; i++) {
if (i != 0) PrintF(", ");
@@ -1361,6 +1754,10 @@ void TextInstructionPrinter::VisitCountOperation(CountOperation* expr) {
} else {
PrintF("@%d%s", expr->expression()->num(), Token::String(expr->op()));
}
+
+ if (expr->num() != AstNode::kNoNumber) {
+ PrintF(" ;; D%d", expr->num());
+ }
}
@@ -1392,36 +1789,66 @@ static int node_count = 0;
static int instruction_count = 0;
-void Node::AssignNumbers() {
+void Node::AssignNodeNumber() {
set_number(node_count++);
}
-void BlockNode::AssignNumbers() {
- set_number(node_count++);
- for (int i = 0, len = instructions_.length(); i < len; i++) {
- instructions_[i]->set_num(instruction_count++);
+void Node::PrintReachingDefinitions() {
+ if (rd_.rd_in() != NULL) {
+ ASSERT(rd_.kill() != NULL && rd_.gen() != NULL);
+
+ PrintF("RD_in = {");
+ bool first = true;
+ for (int i = 0; i < rd_.rd_in()->length(); i++) {
+ if (rd_.rd_in()->Contains(i)) {
+ if (!first) PrintF(",");
+ PrintF("%d");
+ first = false;
+ }
+ }
+ PrintF("}\n");
+
+ PrintF("RD_kill = {");
+ first = true;
+ for (int i = 0; i < rd_.kill()->length(); i++) {
+ if (rd_.kill()->Contains(i)) {
+ if (!first) PrintF(",");
+ PrintF("%d");
+ first = false;
+ }
+ }
+ PrintF("}\n");
+
+ PrintF("RD_gen = {");
+ first = true;
+ for (int i = 0; i < rd_.gen()->length(); i++) {
+ if (rd_.gen()->Contains(i)) {
+ if (!first) PrintF(",");
+ PrintF("%d");
+ first = false;
+ }
+ }
+ PrintF("}\n");
}
}
-void EntryNode::PrintText() {
- PrintF("L%d: Entry\n", number());
- PrintF("goto L%d\n\n", successor_->number());
-}
-
void ExitNode::PrintText() {
+ PrintReachingDefinitions();
PrintF("L%d: Exit\n\n", number());
}
void BlockNode::PrintText() {
+ PrintReachingDefinitions();
// Print the instructions in the block.
PrintF("L%d: Block\n", number());
TextInstructionPrinter printer;
for (int i = 0, len = instructions_.length(); i < len; i++) {
- PrintF("%d ", instructions_[i]->num());
+ PrintF("%d ", printer.NextNumber());
printer.Visit(instructions_[i]);
+ printer.AssignNumber(instructions_[i]);
PrintF("\n");
}
PrintF("goto L%d\n\n", successor_->number());
@@ -1429,12 +1856,14 @@ void BlockNode::PrintText() {
void BranchNode::PrintText() {
+ PrintReachingDefinitions();
PrintF("L%d: Branch\n", number());
PrintF("goto (L%d, L%d)\n\n", successor0_->number(), successor1_->number());
}
void JoinNode::PrintText() {
+ PrintReachingDefinitions();
PrintF("L%d: Join(", number());
for (int i = 0, len = predecessors_.length(); i < len; i++) {
if (i != 0) PrintF(", ");
@@ -1451,7 +1880,7 @@ void FlowGraph::PrintText(ZoneList* postorder) {
node_count = 0;
instruction_count = 0;
for (int i = postorder->length() - 1; i >= 0; i--) {
- postorder->at(i)->AssignNumbers();
+ postorder->at(i)->AssignNodeNumber();
}
// Print basic blocks in reverse postorder.
@@ -1464,4 +1893,231 @@ void FlowGraph::PrintText(ZoneList* postorder) {
#endif // defined(DEBUG)
+int ReachingDefinitions::IndexFor(Variable* var, int variable_count) {
+ // Parameters are numbered left-to-right from the beginning of the bit
+ // set. Stack-allocated locals are allocated right-to-left from the end.
+ ASSERT(var != NULL && var->IsStackAllocated());
+ Slot* slot = var->slot();
+ if (slot->type() == Slot::PARAMETER) {
+ return slot->index();
+ } else {
+ return (variable_count - 1) - slot->index();
+ }
+}
+
+
+void Node::InitializeReachingDefinitions(int definition_count,
+ List* variables,
+ WorkList* worklist,
+ bool mark) {
+ ASSERT(!IsMarkedWith(mark));
+ rd_.Initialize(definition_count);
+ MarkWith(mark);
+ worklist->Insert(this);
+}
+
+
+void BlockNode::InitializeReachingDefinitions(int definition_count,
+ List* variables,
+ WorkList* worklist,
+ bool mark) {
+ ASSERT(!IsMarkedWith(mark));
+ int instruction_count = instructions_.length();
+ int variable_count = variables->length();
+
+ rd_.Initialize(definition_count);
+
+ for (int i = 0; i < instruction_count; i++) {
+ Expression* expr = instructions_[i]->AsExpression();
+ if (expr == NULL) continue;
+ Variable* var = expr->AssignedVar();
+ if (var == NULL || !var->IsStackAllocated()) continue;
+
+ // All definitions of this variable are killed.
+ BitVector* def_set =
+ variables->at(ReachingDefinitions::IndexFor(var, variable_count));
+ rd_.kill()->Union(*def_set);
+
+ // All previously generated definitions are not generated.
+ rd_.gen()->Subtract(*def_set);
+
+ // This one is generated.
+ rd_.gen()->Add(expr->num());
+ }
+
+ // Add all blocks except the entry node to the worklist.
+ if (predecessor_ != NULL) {
+ MarkWith(mark);
+ worklist->Insert(this);
+ }
+}
+
+
+void ExitNode::ComputeRDOut(BitVector* result) {
+ // Should not be the predecessor of any node.
+ UNREACHABLE();
+}
+
+
+void BlockNode::ComputeRDOut(BitVector* result) {
+ // All definitions reaching this block ...
+ result->CopyFrom(*rd_.rd_in());
+ // ... except those killed by the block ...
+ result->Subtract(*rd_.kill());
+ // ... but including those generated by the block.
+ result->Union(*rd_.gen());
+}
+
+
+void BranchNode::ComputeRDOut(BitVector* result) {
+ // Branch nodes don't kill or generate definitions.
+ result->CopyFrom(*rd_.rd_in());
+}
+
+
+void JoinNode::ComputeRDOut(BitVector* result) {
+ // Join nodes don't kill or generate definitions.
+ result->CopyFrom(*rd_.rd_in());
+}
+
+
+void ExitNode::UpdateRDIn(WorkList* worklist, bool mark) {
+ // The exit node has no successors so we can just update in place. New
+ // RD_in is the union over all predecessors.
+ int definition_count = rd_.rd_in()->length();
+ rd_.rd_in()->Clear();
+
+ BitVector temp(definition_count);
+ for (int i = 0, len = predecessors_.length(); i < len; i++) {
+ // Because ComputeRDOut always overwrites temp and its value is
+ // always read out before calling ComputeRDOut again, we do not
+ // have to clear it on each iteration of the loop.
+ predecessors_[i]->ComputeRDOut(&temp);
+ rd_.rd_in()->Union(temp);
+ }
+}
+
+
+void BlockNode::UpdateRDIn(WorkList* worklist, bool mark) {
+ // The entry block has no predecessor. Its RD_in does not change.
+ if (predecessor_ == NULL) return;
+
+ BitVector new_rd_in(rd_.rd_in()->length());
+ predecessor_->ComputeRDOut(&new_rd_in);
+
+ if (rd_.rd_in()->Equals(new_rd_in)) return;
+
+ // Update RD_in.
+ rd_.rd_in()->CopyFrom(new_rd_in);
+ // Add the successor to the worklist if not already present.
+ if (!successor_->IsMarkedWith(mark)) {
+ successor_->MarkWith(mark);
+ worklist->Insert(successor_);
+ }
+}
+
+
+void BranchNode::UpdateRDIn(WorkList* worklist, bool mark) {
+ BitVector new_rd_in(rd_.rd_in()->length());
+ predecessor_->ComputeRDOut(&new_rd_in);
+
+ if (rd_.rd_in()->Equals(new_rd_in)) return;
+
+ // Update RD_in.
+ rd_.rd_in()->CopyFrom(new_rd_in);
+ // Add the successors to the worklist if not already present.
+ if (!successor0_->IsMarkedWith(mark)) {
+ successor0_->MarkWith(mark);
+ worklist->Insert(successor0_);
+ }
+ if (!successor1_->IsMarkedWith(mark)) {
+ successor1_->MarkWith(mark);
+ worklist->Insert(successor1_);
+ }
+}
+
+
+void JoinNode::UpdateRDIn(WorkList* worklist, bool mark) {
+ int definition_count = rd_.rd_in()->length();
+ BitVector new_rd_in(definition_count);
+
+ // New RD_in is the union over all predecessors.
+ BitVector temp(definition_count);
+ for (int i = 0, len = predecessors_.length(); i < len; i++) {
+ predecessors_[i]->ComputeRDOut(&temp);
+ new_rd_in.Union(temp);
+ }
+
+ if (rd_.rd_in()->Equals(new_rd_in)) return;
+
+ // Update RD_in.
+ rd_.rd_in()->CopyFrom(new_rd_in);
+ // Add the successor to the worklist if not already present.
+ if (!successor_->IsMarkedWith(mark)) {
+ successor_->MarkWith(mark);
+ worklist->Insert(successor_);
+ }
+}
+
+
+void ReachingDefinitions::Compute() {
+ ASSERT(!definitions_->is_empty());
+
+ int variable_count = variables_.length();
+ int definition_count = definitions_->length();
+ int node_count = postorder_->length();
+
+ // Step 1: For each variable, identify the set of all its definitions in
+ // the body.
+ for (int i = 0; i < definition_count; i++) {
+ Variable* var = definitions_->at(i)->AssignedVar();
+ variables_[IndexFor(var, variable_count)]->Add(i);
+ }
+
+ if (FLAG_print_graph_text) {
+ for (int i = 0; i < variable_count; i++) {
+ BitVector* def_set = variables_[i];
+ if (!def_set->IsEmpty()) {
+ // At least one definition.
+ bool first = true;
+ for (int j = 0; j < definition_count; j++) {
+ if (def_set->Contains(j)) {
+ if (first) {
+ Variable* var = definitions_->at(j)->AssignedVar();
+ ASSERT(var != NULL);
+ PrintF("Def[%s] = {%d", *var->name()->ToCString(), j);
+ first = false;
+ } else {
+ PrintF(", %d", j);
+ }
+ }
+ }
+ PrintF("}\n");
+ }
+ }
+ }
+
+ // Step 2: Compute KILL and GEN for each block node, initialize RD_in for
+ // all nodes, and mark and add all nodes to the worklist in reverse
+ // postorder. All nodes should currently have the same mark.
+ bool mark = postorder_->at(0)->IsMarkedWith(false); // Negation of current.
+ WorkList worklist(node_count);
+ for (int i = node_count - 1; i >= 0; i--) {
+ postorder_->at(i)->InitializeReachingDefinitions(definition_count,
+ &variables_,
+ &worklist,
+ mark);
+ }
+
+ // Step 3: Until the worklist is empty, remove an item compute and update
+ // its rd_in based on its predecessor's rd_out. If rd_in has changed, add
+ // all necessary successors to the worklist.
+ while (!worklist.is_empty()) {
+ Node* node = worklist.Remove();
+ node->MarkWith(!mark);
+ node->UpdateRDIn(&worklist, mark);
+ }
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/data-flow.h b/deps/v8/src/data-flow.h
index 2dc2d73275..236d0ad441 100644
--- a/deps/v8/src/data-flow.h
+++ b/deps/v8/src/data-flow.h
@@ -100,6 +100,13 @@ class BitVector: public ZoneObject {
}
}
+ void Subtract(const BitVector& other) {
+ ASSERT(other.length() == length());
+ for (int i = 0; i < data_length_; i++) {
+ data_[i] &= ~other.data_[i];
+ }
+ }
+
void Clear() {
for (int i = 0; i < data_length_; i++) {
data_[i] = 0;
@@ -113,6 +120,13 @@ class BitVector: public ZoneObject {
return true;
}
+ bool Equals(const BitVector& other) {
+ for (int i = 0; i < data_length_; i++) {
+ if (data_[i] != other.data_[i]) return false;
+ }
+ return true;
+ }
+
int length() const { return length_; }
private:
@@ -122,56 +136,68 @@ class BitVector: public ZoneObject {
};
-// Forward declarations of Node types.
-class Node;
-class BranchNode;
-class JoinNode;
-
-// Flow graphs have a single entry and single exit. The empty flowgraph is
-// represented by both entry and exit being NULL.
-class FlowGraph BASE_EMBEDDED {
+// Simple fixed-capacity list-based worklist (managed as a queue) of
+// pointers to T.
+template
+class WorkList BASE_EMBEDDED {
public:
- FlowGraph() : entry_(NULL), exit_(NULL) {}
+ // The worklist cannot grow bigger than size. We keep one item empty to
+ // distinguish between empty and full.
+ explicit WorkList(int size)
+ : capacity_(size + 1), head_(0), tail_(0), queue_(capacity_) {
+ for (int i = 0; i < capacity_; i++) queue_.Add(NULL);
+ }
- static FlowGraph Empty() { return FlowGraph(); }
+ bool is_empty() { return head_ == tail_; }
- bool is_empty() const { return entry_ == NULL; }
- Node* entry() const { return entry_; }
- Node* exit() const { return exit_; }
+ bool is_full() {
+ // The worklist is full if head is at 0 and tail is at capacity - 1:
+ // head == 0 && tail == capacity-1 ==> tail - head == capacity - 1
+ // or if tail is immediately to the left of head:
+ // tail+1 == head ==> tail - head == -1
+ int diff = tail_ - head_;
+ return (diff == -1 || diff == capacity_ - 1);
+ }
- // Add a single instruction to the end of this flowgraph.
- void AppendInstruction(AstNode* instruction);
+ void Insert(T* item) {
+ ASSERT(!is_full());
+ queue_[tail_++] = item;
+ if (tail_ == capacity_) tail_ = 0;
+ }
- // Add a single node to the end of this flow graph.
- void AppendNode(Node* node);
+ T* Remove() {
+ ASSERT(!is_empty());
+ T* item = queue_[head_++];
+ if (head_ == capacity_) head_ = 0;
+ return item;
+ }
- // Add a flow graph fragment to the end of this one.
- void AppendGraph(FlowGraph* graph);
+ private:
+ int capacity_; // Including one empty slot.
+ int head_; // Where the first item is.
+ int tail_; // Where the next inserted item will go.
+ List queue_;
+};
- // Concatenate an if-then-else flow-graph to this one. Control is split
- // and merged, so the graph remains single-entry, single-exit.
- void Split(BranchNode* branch,
- FlowGraph* left,
- FlowGraph* right,
- JoinNode* merge);
- // Concatenate a forward loop (e.g., while or for loop) flow-graph to this
- // one. Control is split by the condition and merged back from the back
- // edge at end of the body to the beginning of the condition. The single
- // (free) exit of the result graph is the right (false) arm of the branch
- // node.
- void Loop(JoinNode* merge,
- FlowGraph* condition,
- BranchNode* branch,
- FlowGraph* body);
+struct ReachingDefinitionsData BASE_EMBEDDED {
+ public:
+ ReachingDefinitionsData() : rd_in_(NULL), kill_(NULL), gen_(NULL) {}
-#ifdef DEBUG
- void PrintText(ZoneList* postorder);
-#endif
+ void Initialize(int definition_count) {
+ rd_in_ = new BitVector(definition_count);
+ kill_ = new BitVector(definition_count);
+ gen_ = new BitVector(definition_count);
+ }
+
+ BitVector* rd_in() { return rd_in_; }
+ BitVector* kill() { return kill_; }
+ BitVector* gen() { return gen_; }
private:
- Node* entry_;
- Node* exit_;
+ BitVector* rd_in_;
+ BitVector* kill_;
+ BitVector* gen_;
};
@@ -182,7 +208,9 @@ class Node: public ZoneObject {
virtual ~Node() {}
+ virtual bool IsExitNode() { return false; }
virtual bool IsBlockNode() { return false; }
+ virtual bool IsBranchNode() { return false; }
virtual bool IsJoinNode() { return false; }
virtual void AddPredecessor(Node* predecessor) = 0;
@@ -200,11 +228,23 @@ class Node: public ZoneObject {
int number() { return number_; }
void set_number(int number) { number_ = number; }
+ // Functions used by data-flow analyses.
+ virtual void InitializeReachingDefinitions(int definition_count,
+ List* variables,
+ WorkList* worklist,
+ bool mark);
+ virtual void ComputeRDOut(BitVector* result) = 0;
+ virtual void UpdateRDIn(WorkList* worklist, bool mark) = 0;
+
#ifdef DEBUG
- virtual void AssignNumbers();
+ void AssignNodeNumber();
+ void PrintReachingDefinitions();
virtual void PrintText() = 0;
#endif
+ protected:
+ ReachingDefinitionsData rd_;
+
private:
int number_;
bool mark_;
@@ -213,49 +253,27 @@ class Node: public ZoneObject {
};
-// An entry node has no predecessors and a single successor.
-class EntryNode: public Node {
- public:
- EntryNode() : successor_(NULL) {}
-
- void AddPredecessor(Node* predecessor) { UNREACHABLE(); }
-
- void AddSuccessor(Node* successor) {
- ASSERT(successor_ == NULL && successor != NULL);
- successor_ = successor;
- }
-
- void Traverse(bool mark,
- ZoneList* preorder,
- ZoneList* postorder);
-
-#ifdef DEBUG
- void PrintText();
-#endif
-
- private:
- Node* successor_;
-
- DISALLOW_COPY_AND_ASSIGN(EntryNode);
-};
-
-
// An exit node has a arbitrarily many predecessors and no successors.
class ExitNode: public Node {
public:
ExitNode() : predecessors_(4) {}
+ bool IsExitNode() { return true; }
+
void AddPredecessor(Node* predecessor) {
ASSERT(predecessor != NULL);
predecessors_.Add(predecessor);
}
- void AddSuccessor(Node* successor) { /* Do nothing. */ }
+ void AddSuccessor(Node* successor) { UNREACHABLE(); }
void Traverse(bool mark,
ZoneList* preorder,
ZoneList* postorder);
+ void ComputeRDOut(BitVector* result);
+ void UpdateRDIn(WorkList* worklist, bool mark);
+
#ifdef DEBUG
void PrintText();
#endif
@@ -280,6 +298,8 @@ class BlockNode: public Node {
bool IsBlockNode() { return true; }
+ bool is_empty() { return instructions_.is_empty(); }
+
void AddPredecessor(Node* predecessor) {
ASSERT(predecessor_ == NULL && predecessor != NULL);
predecessor_ = predecessor;
@@ -298,8 +318,14 @@ class BlockNode: public Node {
ZoneList* preorder,
ZoneList* postorder);
+ void InitializeReachingDefinitions(int definition_count,
+ List* variables,
+ WorkList* worklist,
+ bool mark);
+ void ComputeRDOut(BitVector* result);
+ void UpdateRDIn(WorkList* worklist, bool mark);
+
#ifdef DEBUG
- void AssignNumbers();
void PrintText();
#endif
@@ -317,6 +343,8 @@ class BranchNode: public Node {
public:
BranchNode() : predecessor_(NULL), successor0_(NULL), successor1_(NULL) {}
+ bool IsBranchNode() { return true; }
+
void AddPredecessor(Node* predecessor) {
ASSERT(predecessor_ == NULL && predecessor != NULL);
predecessor_ = predecessor;
@@ -335,6 +363,9 @@ class BranchNode: public Node {
ZoneList* preorder,
ZoneList* postorder);
+ void ComputeRDOut(BitVector* result);
+ void UpdateRDIn(WorkList* worklist, bool mark);
+
#ifdef DEBUG
void PrintText();
#endif
@@ -374,6 +405,9 @@ class JoinNode: public Node {
ZoneList* preorder,
ZoneList* postorder);
+ void ComputeRDOut(BitVector* result);
+ void UpdateRDIn(WorkList* worklist, bool mark);
+
#ifdef DEBUG
void PrintText();
#endif
@@ -386,12 +420,68 @@ class JoinNode: public Node {
};
+// Flow graphs have a single entry and single exit. The empty flowgraph is
+// represented by both entry and exit being NULL.
+class FlowGraph BASE_EMBEDDED {
+ public:
+ static FlowGraph Empty() {
+ FlowGraph graph;
+ graph.entry_ = new BlockNode();
+ graph.exit_ = graph.entry_;
+ return graph;
+ }
+
+ bool is_empty() const {
+ return entry_ == exit_ && BlockNode::cast(entry_)->is_empty();
+ }
+ Node* entry() const { return entry_; }
+ Node* exit() const { return exit_; }
+
+ // Add a single instruction to the end of this flowgraph.
+ void AppendInstruction(AstNode* instruction);
+
+ // Add a single node to the end of this flow graph.
+ void AppendNode(Node* node);
+
+ // Add a flow graph fragment to the end of this one.
+ void AppendGraph(FlowGraph* graph);
+
+ // Concatenate an if-then-else flow-graph to this one. Control is split
+ // and merged, so the graph remains single-entry, single-exit.
+ void Split(BranchNode* branch,
+ FlowGraph* left,
+ FlowGraph* right,
+ JoinNode* merge);
+
+ // Concatenate a forward loop (e.g., while or for loop) flow-graph to this
+ // one. Control is split by the condition and merged back from the back
+ // edge at end of the body to the beginning of the condition. The single
+ // (free) exit of the result graph is the right (false) arm of the branch
+ // node.
+ void Loop(JoinNode* merge,
+ FlowGraph* condition,
+ BranchNode* branch,
+ FlowGraph* body);
+
+#ifdef DEBUG
+ void PrintText(ZoneList* postorder);
+#endif
+
+ private:
+ FlowGraph() : entry_(NULL), exit_(NULL) {}
+
+ Node* entry_;
+ Node* exit_;
+};
+
+
// Construct a flow graph from a function literal. Build pre- and postorder
// traversal orders as a byproduct.
class FlowGraphBuilder: public AstVisitor {
public:
FlowGraphBuilder()
- : global_exit_(NULL),
+ : graph_(FlowGraph::Empty()),
+ global_exit_(NULL),
preorder_(4),
postorder_(4),
definitions_(4) {
@@ -400,8 +490,8 @@ class FlowGraphBuilder: public AstVisitor {
void Build(FunctionLiteral* lit);
FlowGraph* graph() { return &graph_; }
-
ZoneList* postorder() { return &postorder_; }
+ ZoneList* definitions() { return &definitions_; }
private:
ExitNode* global_exit() { return global_exit_; }
@@ -418,8 +508,9 @@ class FlowGraphBuilder: public AstVisitor {
// The flow graph builder collects a list of definitions (assignments and
// count operations) to stack-allocated variables to use for reaching
- // definitions analysis.
- ZoneList definitions_;
+ // definitions analysis. AST node numbers in the AST are used to refer
+ // into this list.
+ ZoneList definitions_;
DISALLOW_COPY_AND_ASSIGN(FlowGraphBuilder);
};
@@ -502,6 +593,74 @@ class LivenessAnalyzer : public AstVisitor {
};
+// Computes the set of assigned variables and annotates variables proxies
+// that are trivial sub-expressions and for-loops where the loop variable
+// is guaranteed to be a smi.
+class AssignedVariablesAnalyzer : public AstVisitor {
+ public:
+ explicit AssignedVariablesAnalyzer(FunctionLiteral* fun);
+
+ void Analyze();
+
+ private:
+ Variable* FindSmiLoopVariable(ForStatement* stmt);
+
+ int BitIndex(Variable* var);
+
+ void RecordAssignedVar(Variable* var);
+
+ void MarkIfTrivial(Expression* expr);
+
+ // Visits an expression saving the accumulator before, clearing
+ // it before visting and restoring it after visiting.
+ void ProcessExpression(Expression* expr);
+
+ // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ FunctionLiteral* fun_;
+
+ // Accumulator for assigned variables set.
+ BitVector av_;
+
+ DISALLOW_COPY_AND_ASSIGN(AssignedVariablesAnalyzer);
+};
+
+
+class ReachingDefinitions BASE_EMBEDDED {
+ public:
+ ReachingDefinitions(ZoneList* postorder,
+ ZoneList* definitions,
+ int variable_count)
+ : postorder_(postorder),
+ definitions_(definitions),
+ variables_(variable_count) {
+ int definition_count = definitions->length();
+ for (int i = 0; i < variable_count; i++) {
+ variables_.Add(new BitVector(definition_count));
+ }
+ }
+
+ static int IndexFor(Variable* var, int variable_count);
+
+ void Compute();
+
+ private:
+ // A (postorder) list of flow-graph nodes in the body.
+ ZoneList* postorder_;
+
+ // A list of all the definitions in the body.
+ ZoneList* definitions_;
+
+ // For each variable, the set of all its definitions.
+ List variables_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReachingDefinitions);
+};
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/date-delay.js b/deps/v8/src/date-delay.js
index c0180c28c2..f2ea1eca57 100644
--- a/deps/v8/src/date-delay.js
+++ b/deps/v8/src/date-delay.js
@@ -293,55 +293,48 @@ function CalculateDateTable() {
}
-// Constructor for creating objects holding year, month, and date.
-// Introduced to ensure the two return points in FromJulianDay match same map.
-function DayTriplet(year, month, date) {
- this.year = year;
- this.month = month;
- this.date = date;
-}
-
-var julian_day_cache_triplet;
-var julian_day_cache_day = $NaN;
-
-// Compute year, month, and day from modified Julian day.
-// The missing days in 1582 are ignored for JavaScript compatibility.
-function FromJulianDay(julian) {
- if (julian_day_cache_day == julian) {
- return julian_day_cache_triplet;
+var ymd_from_time_cache = [$NaN, $NaN, $NaN];
+var ymd_from_time_cached_time = $NaN;
+
+function YearFromTime(t) {
+ if (t !== ymd_from_time_cached_time) {
+ // Limits according to ECMA 262 15.9.1.1
+ if (!$isFinite(t) || t < -8640000000000000 || t > 8640000000000000) {
+ return $NaN;
+ }
+
+ %DateYMDFromTime(t, ymd_from_time_cache);
+ ymd_from_time_cached_time = t
}
- var result;
- // Avoid floating point and non-Smi maths in common case. This is also a period of
- // time where leap years are very regular. The range is not too large to avoid overflow
- // when doing the multiply-to-divide trick.
- if (julian > kDayZeroInJulianDay &&
- (julian - kDayZeroInJulianDay) < 40177) { // 1970 - 2080
- var jsimple = (julian - kDayZeroInJulianDay) + 731; // Day 0 is 1st January 1968
- var y = 1968;
- // Divide by 1461 by multiplying with 22967 and shifting down by 25!
- var after_1968 = (jsimple * 22967) >> 25;
- y += after_1968 << 2;
- jsimple -= 1461 * after_1968;
- var four_year_cycle = four_year_cycle_table[jsimple];
- result = new DayTriplet(y + (four_year_cycle >> kYearShift),
- (four_year_cycle & kMonthMask) >> kMonthShift,
- four_year_cycle & kDayMask);
- } else {
- var jalpha = FLOOR((julian - 1867216.25) / 36524.25);
- var jb = julian + 1 + jalpha - FLOOR(0.25 * jalpha) + 1524;
- var jc = FLOOR(6680.0 + ((jb-2439870) - 122.1)/365.25);
- var jd = FLOOR(365 * jc + (0.25 * jc));
- var je = FLOOR((jb - jd)/30.6001);
- var m = je - 1;
- if (m > 12) m -= 13;
- var y = jc - 4715;
- if (m > 2) { --y; --m; }
- var d = jb - jd - FLOOR(30.6001 * je);
- result = new DayTriplet(y, m, d);
+
+ return ymd_from_time_cache[0];
+}
+
+function MonthFromTime(t) {
+ if (t !== ymd_from_time_cached_time) {
+ // Limits according to ECMA 262 15.9.1.1
+ if (!$isFinite(t) || t < -8640000000000000 || t > 8640000000000000) {
+ return $NaN;
+ }
+ %DateYMDFromTime(t, ymd_from_time_cache);
+ ymd_from_time_cached_time = t
}
- julian_day_cache_day = julian;
- julian_day_cache_triplet = result;
- return result;
+
+ return ymd_from_time_cache[1];
+}
+
+function DateFromTime(t) {
+ if (t !== ymd_from_time_cached_time) {
+ // Limits according to ECMA 262 15.9.1.1
+ if (!$isFinite(t) || t < -8640000000000000 || t > 8640000000000000) {
+ return $NaN;
+ }
+
+ %DateYMDFromTime(t, ymd_from_time_cache);
+ ymd_from_time_cached_time = t
+ }
+
+ return ymd_from_time_cache[2];
}
@@ -577,11 +570,10 @@ function TwoDigitString(value) {
function DateString(time) {
- var YMD = FromJulianDay(DAY(time) + kDayZeroInJulianDay);
return WeekDays[WeekDay(time)] + ' '
- + Months[YMD.month] + ' '
- + TwoDigitString(YMD.date) + ' '
- + YMD.year;
+ + Months[MonthFromTime(time)] + ' '
+ + TwoDigitString(DateFromTime(time)) + ' '
+ + YearFromTime(time);
}
@@ -590,11 +582,10 @@ var LongMonths = ['January', 'February', 'March', 'April', 'May', 'June', 'July'
function LongDateString(time) {
- var YMD = FromJulianDay(DAY(time) + kDayZeroInJulianDay);
return LongWeekDays[WeekDay(time)] + ', '
- + LongMonths[YMD.month] + ' '
- + TwoDigitString(YMD.date) + ', '
- + YMD.year;
+ + LongMonths[MonthFromTime(time)] + ' '
+ + TwoDigitString(DateFromTime(time)) + ', '
+ + YearFromTime(time);
}
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index 959bea14de..81752e64e2 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -803,7 +803,7 @@ void Debug::PreemptionWhileInDebugger() {
void Debug::Iterate(ObjectVisitor* v) {
- v->VisitPointer(bit_cast(&(debug_break_return_)));
+ v->VisitPointer(BitCast(&(debug_break_return_)));
}
diff --git a/deps/v8/src/diy_fp.h b/deps/v8/src/diy_fp.h
new file mode 100644
index 0000000000..9cd7003683
--- /dev/null
+++ b/deps/v8/src/diy_fp.h
@@ -0,0 +1,136 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_DIY_FP_H_
+#define V8_DIY_FP_H_
+
+namespace v8 {
+namespace internal {
+
+// This "Do It Yourself Floating Point" class implements a floating-point number
+// with a uint64 significand and an int exponent. Normalized DiyFp numbers will
+// have the most significant bit of the significand set.
+// Multiplication and Subtraction do not normalize their results.
+// DiyFp are not designed to contain special doubles (NaN and Infinity).
+class DiyFp {
+ public:
+ static const int kSignificandSize = 64;
+
+ DiyFp() : f_(0), e_(0) {}
+ DiyFp(uint64_t f, int e) : f_(f), e_(e) {}
+
+ // this = this - other.
+ // The exponents of both numbers must be the same and the significand of this
+ // must be bigger than the significand of other.
+ // The result will not be normalized.
+ void Subtract(const DiyFp& other) {
+ ASSERT(e_ == other.e_);
+ ASSERT(f_ >= other.f_);
+ f_ -= other.f_;
+ }
+
+ // Returns a - b.
+ // The exponents of both numbers must be the same and this must be bigger
+ // than other. The result will not be normalized.
+ static DiyFp Minus(const DiyFp& a, const DiyFp& b) {
+ DiyFp result = a;
+ result.Subtract(b);
+ return result;
+ }
+
+
+ // this = this * other.
+ void Multiply(const DiyFp& other) {
+ // Simply "emulates" a 128 bit multiplication.
+ // However: the resulting number only contains 64 bits. The least
+ // significant 64 bits are only used for rounding the most significant 64
+ // bits.
+ const uint64_t kM32 = 0xFFFFFFFFu;
+ uint64_t a = f_ >> 32;
+ uint64_t b = f_ & kM32;
+ uint64_t c = other.f_ >> 32;
+ uint64_t d = other.f_ & kM32;
+ uint64_t ac = a * c;
+ uint64_t bc = b * c;
+ uint64_t ad = a * d;
+ uint64_t bd = b * d;
+ uint64_t tmp = (bd >> 32) + (ad & kM32) + (bc & kM32);
+ tmp += 1U << 31; // round
+ uint64_t result_f = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32);
+ e_ += other.e_ + 64;
+ f_ = result_f;
+ }
+
+ // returns a * b;
+ static DiyFp Times(const DiyFp& a, const DiyFp& b) {
+ DiyFp result = a;
+ result.Multiply(b);
+ return result;
+ }
+
+ void Normalize() {
+ ASSERT(f_ != 0);
+ uint64_t f = f_;
+ int e = e_;
+
+ // This method is mainly called for normalizing boundaries. In general
+ // boundaries need to be shifted by 10 bits. We thus optimize for this case.
+ const uint64_t k10MSBits = V8_2PART_UINT64_C(0xFFC00000, 00000000);
+ while ((f & k10MSBits) == 0) {
+ f <<= 10;
+ e -= 10;
+ }
+ while ((f & kUint64MSB) == 0) {
+ f <<= 1;
+ e--;
+ }
+ f_ = f;
+ e_ = e;
+ }
+
+ static DiyFp Normalize(const DiyFp& a) {
+ DiyFp result = a;
+ result.Normalize();
+ return result;
+ }
+
+ uint64_t f() const { return f_; }
+ int e() const { return e_; }
+
+ void set_f(uint64_t new_value) { f_ = new_value; }
+ void set_e(int new_value) { e_ = new_value; }
+
+ private:
+ static const uint64_t kUint64MSB = V8_2PART_UINT64_C(0x80000000, 00000000);
+
+ uint64_t f_;
+ int e_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_DIY_FP_H_
diff --git a/deps/v8/src/double.h b/deps/v8/src/double.h
new file mode 100644
index 0000000000..f3d17b93f9
--- /dev/null
+++ b/deps/v8/src/double.h
@@ -0,0 +1,169 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_DOUBLE_H_
+#define V8_DOUBLE_H_
+
+#include "diy_fp.h"
+
+namespace v8 {
+namespace internal {
+
+// We assume that doubles and uint64_t have the same endianness.
+static uint64_t double_to_uint64(double d) { return BitCast(d); }
+static double uint64_to_double(uint64_t d64) { return BitCast(d64); }
+
+// Helper functions for doubles.
+class Double {
+ public:
+ static const uint64_t kSignMask = V8_2PART_UINT64_C(0x80000000, 00000000);
+ static const uint64_t kExponentMask = V8_2PART_UINT64_C(0x7FF00000, 00000000);
+ static const uint64_t kSignificandMask =
+ V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
+ static const uint64_t kHiddenBit = V8_2PART_UINT64_C(0x00100000, 00000000);
+
+ Double() : d64_(0) {}
+ explicit Double(double d) : d64_(double_to_uint64(d)) {}
+ explicit Double(uint64_t d64) : d64_(d64) {}
+
+ DiyFp AsDiyFp() const {
+ ASSERT(!IsSpecial());
+ return DiyFp(Significand(), Exponent());
+ }
+
+ // this->Significand() must not be 0.
+ DiyFp AsNormalizedDiyFp() const {
+ uint64_t f = Significand();
+ int e = Exponent();
+
+ ASSERT(f != 0);
+
+ // The current double could be a denormal.
+ while ((f & kHiddenBit) == 0) {
+ f <<= 1;
+ e--;
+ }
+ // Do the final shifts in one go. Don't forget the hidden bit (the '-1').
+ f <<= DiyFp::kSignificandSize - kSignificandSize - 1;
+ e -= DiyFp::kSignificandSize - kSignificandSize - 1;
+ return DiyFp(f, e);
+ }
+
+ // Returns the double's bit as uint64.
+ uint64_t AsUint64() const {
+ return d64_;
+ }
+
+ int Exponent() const {
+ if (IsDenormal()) return kDenormalExponent;
+
+ uint64_t d64 = AsUint64();
+ int biased_e = static_cast((d64 & kExponentMask) >> kSignificandSize);
+ return biased_e - kExponentBias;
+ }
+
+ uint64_t Significand() const {
+ uint64_t d64 = AsUint64();
+ uint64_t significand = d64 & kSignificandMask;
+ if (!IsDenormal()) {
+ return significand + kHiddenBit;
+ } else {
+ return significand;
+ }
+ }
+
+ // Returns true if the double is a denormal.
+ bool IsDenormal() const {
+ uint64_t d64 = AsUint64();
+ return (d64 & kExponentMask) == 0;
+ }
+
+ // We consider denormals not to be special.
+ // Hence only Infinity and NaN are special.
+ bool IsSpecial() const {
+ uint64_t d64 = AsUint64();
+ return (d64 & kExponentMask) == kExponentMask;
+ }
+
+ bool IsNan() const {
+ uint64_t d64 = AsUint64();
+ return ((d64 & kExponentMask) == kExponentMask) &&
+ ((d64 & kSignificandMask) != 0);
+ }
+
+
+ bool IsInfinite() const {
+ uint64_t d64 = AsUint64();
+ return ((d64 & kExponentMask) == kExponentMask) &&
+ ((d64 & kSignificandMask) == 0);
+ }
+
+
+ int Sign() const {
+ uint64_t d64 = AsUint64();
+ return (d64 & kSignMask) == 0? 1: -1;
+ }
+
+
+ // Returns the two boundaries of this.
+ // The bigger boundary (m_plus) is normalized. The lower boundary has the same
+ // exponent as m_plus.
+ void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const {
+ DiyFp v = this->AsDiyFp();
+ bool significand_is_zero = (v.f() == kHiddenBit);
+ DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1));
+ DiyFp m_minus;
+ if (significand_is_zero && v.e() != kDenormalExponent) {
+ // The boundary is closer. Think of v = 1000e10 and v- = 9999e9.
+ // Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but
+ // at a distance of 1e8.
+ // The only exception is for the smallest normal: the largest denormal is
+ // at the same distance as its successor.
+ // Note: denormals have the same exponent as the smallest normals.
+ m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2);
+ } else {
+ m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1);
+ }
+ m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e()));
+ m_minus.set_e(m_plus.e());
+ *out_m_plus = m_plus;
+ *out_m_minus = m_minus;
+ }
+
+ double value() const { return uint64_to_double(d64_); }
+
+ private:
+ static const int kSignificandSize = 52; // Excludes the hidden bit.
+ static const int kExponentBias = 0x3FF + kSignificandSize;
+ static const int kDenormalExponent = -kExponentBias + 1;
+
+ uint64_t d64_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_DOUBLE_H_
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 8d20749641..993e2b6ce9 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -866,6 +866,7 @@ Handle Factory::CreateApiFunction(
map->set_instance_descriptors(*array);
}
+ ASSERT(result->shared()->IsApiFunction());
return result;
}
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 36911da245..e08bde41ad 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -317,7 +317,7 @@ class Factory : public AllStatic {
#define ROOT_ACCESSOR(type, name, camel_name) \
static inline Handle name() { \
- return Handle(bit_cast( \
+ return Handle(BitCast( \
&Heap::roots_[Heap::k##camel_name##RootIndex])); \
}
ROOT_LIST(ROOT_ACCESSOR)
@@ -325,7 +325,7 @@ class Factory : public AllStatic {
#define SYMBOL_ACCESSOR(name, str) \
static inline Handle name() { \
- return Handle(bit_cast( \
+ return Handle(BitCast( \
&Heap::roots_[Heap::k##name##RootIndex])); \
}
SYMBOL_LIST(SYMBOL_ACCESSOR)
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 3840feef43..6759a3588f 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -98,6 +98,11 @@ typedef byte* Address;
#define V8_PTR_PREFIX ""
#endif // V8_HOST_ARCH_64_BIT
+// The following macro works on both 32 and 64-bit platforms.
+// Usage: instead of writing 0x1234567890123456
+// write V8_2PART_UINT64_C(0x12345678,90123456);
+#define V8_2PART_UINT64_C(a, b) (((static_cast(a) << 32) + 0x##b##u))
+
#define V8PRIxPTR V8_PTR_PREFIX "x"
#define V8PRIdPTR V8_PTR_PREFIX "d"
@@ -569,42 +574,6 @@ F FUNCTION_CAST(Address addr) {
#define INLINE(header) inline header
#endif
-// The type-based aliasing rule allows the compiler to assume that pointers of
-// different types (for some definition of different) never alias each other.
-// Thus the following code does not work:
-//
-// float f = foo();
-// int fbits = *(int*)(&f);
-//
-// The compiler 'knows' that the int pointer can't refer to f since the types
-// don't match, so the compiler may cache f in a register, leaving random data
-// in fbits. Using C++ style casts makes no difference, however a pointer to
-// char data is assumed to alias any other pointer. This is the 'memcpy
-// exception'.
-//
-// Bit_cast uses the memcpy exception to move the bits from a variable of one
-// type of a variable of another type. Of course the end result is likely to
-// be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005)
-// will completely optimize bit_cast away.
-//
-// There is an additional use for bit_cast.
-// Recent gccs will warn when they see casts that may result in breakage due to
-// the type-based aliasing rule. If you have checked that there is no breakage
-// you can use bit_cast to cast one pointer type to another. This confuses gcc
-// enough that it can no longer see that you have cast one pointer type to
-// another thus avoiding the warning.
-template
-inline Dest bit_cast(const Source& source) {
- // Compile time assertion: sizeof(Dest) == sizeof(Source)
- // A compile error here means your Dest and Source have different sizes.
- typedef char VerifySizesAreEqual[sizeof(Dest) == sizeof(Source) ? 1 : -1];
-
- Dest dest;
- memcpy(&dest, &source, sizeof(dest));
- return dest;
-}
-
-
// Feature flags bit positions. They are mostly based on the CPUID spec.
// (We assign CPUID itself to one of the currently reserved bits --
// feel free to change this if needed.)
diff --git a/deps/v8/src/grisu3.cc b/deps/v8/src/grisu3.cc
new file mode 100644
index 0000000000..13c493247b
--- /dev/null
+++ b/deps/v8/src/grisu3.cc
@@ -0,0 +1,494 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "grisu3.h"
+
+#include "cached_powers.h"
+#include "diy_fp.h"
+#include "double.h"
+
+namespace v8 {
+namespace internal {
+
+template
+class Grisu3 {
+ public:
+ // Provides a decimal representation of v.
+ // Returns true if it succeeds, otherwise the result can not be trusted.
+ // There will be *length digits inside the buffer (not null-terminated).
+ // If the function returns true then
+ // v == (double) (buffer * 10^decimal_exponent).
+ // The digits in the buffer are the shortest representation possible: no
+ // 0.099999999999 instead of 0.1.
+ // The last digit will be closest to the actual v. That is, even if several
+ // digits might correctly yield 'v' when read again, the closest will be
+ // computed.
+ static bool grisu3(double v,
+ char* buffer, int* length, int* decimal_exponent);
+
+ private:
+ // Rounds the buffer according to the rest.
+ // If there is too much imprecision to round then false is returned.
+ // Similarily false is returned when the buffer is not within Delta.
+ static bool RoundWeed(char* buffer, int len, uint64_t wp_W, uint64_t Delta,
+ uint64_t rest, uint64_t ten_kappa, uint64_t ulp);
+ // Dispatches to the a specialized digit-generation routine. The chosen
+ // routine depends on w.e (which in turn depends on alpha and gamma).
+ // Currently there is only one digit-generation routine, but it would be easy
+ // to add others.
+ static bool DigitGen(DiyFp low, DiyFp w, DiyFp high,
+ char* buffer, int* len, int* kappa);
+ // Generates w's digits. The result is the shortest in the interval low-high.
+ // All DiyFp are assumed to be imprecise and this function takes this
+ // imprecision into account. If the function cannot compute the best
+ // representation (due to the imprecision) then false is returned.
+ static bool DigitGen_m60_m32(DiyFp low, DiyFp w, DiyFp high,
+ char* buffer, int* length, int* kappa);
+};
+
+
+template
+bool Grisu3::grisu3(double v,
+ char* buffer,
+ int* length,
+ int* decimal_exponent) {
+ DiyFp w = Double(v).AsNormalizedDiyFp();
+ // boundary_minus and boundary_plus are the boundaries between v and its
+ // neighbors. Any number strictly between boundary_minus and boundary_plus
+ // will round to v when read as double.
+ // Grisu3 will never output representations that lie exactly on a boundary.
+ DiyFp boundary_minus, boundary_plus;
+ Double(v).NormalizedBoundaries(&boundary_minus, &boundary_plus);
+ ASSERT(boundary_plus.e() == w.e());
+ DiyFp ten_mk; // Cached power of ten: 10^-k
+ int mk; // -k
+ GetCachedPower(w.e() + DiyFp::kSignificandSize, alpha, gamma, &mk, &ten_mk);
+ ASSERT(alpha <= w.e() + ten_mk.e() + DiyFp::kSignificandSize &&
+ gamma >= w.e() + ten_mk.e() + DiyFp::kSignificandSize);
+ // Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
+ // 64 bit significand and ten_mk is thus only precise up to 64 bits.
+
+ // The DiyFp::Times procedure rounds its result, and ten_mk is approximated
+ // too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now
+ // off by a small amount.
+ // In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w.
+ // In other words: let f = scaled_w.f() and e = scaled_w.e(), then
+ // (f-1) * 2^e < w*10^k < (f+1) * 2^e
+ DiyFp scaled_w = DiyFp::Times(w, ten_mk);
+ ASSERT(scaled_w.e() ==
+ boundary_plus.e() + ten_mk.e() + DiyFp::kSignificandSize);
+ // In theory it would be possible to avoid some recomputations by computing
+ // the difference between w and boundary_minus/plus (a power of 2) and to
+ // compute scaled_boundary_minus/plus by subtracting/adding from
+ // scaled_w. However the code becomes much less readable and the speed
+ // enhancements are not terriffic.
+ DiyFp scaled_boundary_minus = DiyFp::Times(boundary_minus, ten_mk);
+ DiyFp scaled_boundary_plus = DiyFp::Times(boundary_plus, ten_mk);
+
+ // DigitGen will generate the digits of scaled_w. Therefore we have
+ // v == (double) (scaled_w * 10^-mk).
+ // Set decimal_exponent == -mk and pass it to DigitGen. If scaled_w is not an
+ // integer than it will be updated. For instance if scaled_w == 1.23 then
+ // the buffer will be filled with "123" und the decimal_exponent will be
+ // decreased by 2.
+ int kappa;
+ bool result = DigitGen(scaled_boundary_minus, scaled_w, scaled_boundary_plus,
+ buffer, length, &kappa);
+ *decimal_exponent = -mk + kappa;
+ return result;
+}
+
+// Generates the digits of input number w.
+// w is a floating-point number (DiyFp), consisting of a significand and an
+// exponent. Its exponent is bounded by alpha and gamma. Typically alpha >= -63
+// and gamma <= 3.
+// Returns false if it fails, in which case the generated digits in the buffer
+// should not be used.
+// Preconditions:
+// * low, w and high are correct up to 1 ulp (unit in the last place). That
+// is, their error must be less that a unit of their last digits.
+// * low.e() == w.e() == high.e()
+// * low < w < high, and taking into account their error: low~ <= high~
+// * alpha <= w.e() <= gamma
+// Postconditions: returns false if procedure fails.
+// otherwise:
+// * buffer is not null-terminated, but len contains the number of digits.
+// * buffer contains the shortest possible decimal digit-sequence
+// such that LOW < buffer * 10^kappa < HIGH, where LOW and HIGH are the
+// correct values of low and high (without their error).
+// * if more than one decimal representation gives the minimal number of
+// decimal digits then the one closest to W (where W is the correct value
+// of w) is chosen.
+// Remark: this procedure takes into account the imprecision of its input
+// numbers. If the precision is not enough to guarantee all the postconditions
+// then false is returned. This usually happens rarely (~0.5%).
+template
+bool Grisu3::DigitGen(DiyFp low,
+ DiyFp w,
+ DiyFp high,
+ char* buffer,
+ int* len,
+ int* kappa) {
+ ASSERT(low.e() == w.e() && w.e() == high.e());
+ ASSERT(low.f() + 1 <= high.f() - 1);
+ ASSERT(alpha <= w.e() && w.e() <= gamma);
+ // The following tests use alpha and gamma to avoid unnecessary dynamic tests.
+ if ((alpha >= -60 && gamma <= -32) || // -60 <= w.e() <= -32
+ (alpha <= -32 && gamma >= -60 && // Alpha/gamma overlaps -60/-32 region.
+ -60 <= w.e() && w.e() <= -32)) {
+ return DigitGen_m60_m32(low, w, high, buffer, len, kappa);
+ } else {
+ // A simple adaption of the special case -60/-32 would allow greater ranges
+ // of alpha/gamma and thus reduce the number of precomputed cached powers of
+ // ten.
+ UNIMPLEMENTED();
+ return false;
+ }
+}
+
+static const uint32_t kTen4 = 10000;
+static const uint32_t kTen5 = 100000;
+static const uint32_t kTen6 = 1000000;
+static const uint32_t kTen7 = 10000000;
+static const uint32_t kTen8 = 100000000;
+static const uint32_t kTen9 = 1000000000;
+
+// Returns the biggest power of ten that is <= than the given number. We
+// furthermore receive the maximum number of bits 'number' has.
+// If number_bits == 0 then 0^-1 is returned
+// The number of bits must be <= 32.
+static void BiggestPowerTen(uint32_t number,
+ int number_bits,
+ uint32_t* power,
+ int* exponent) {
+ switch (number_bits) {
+ case 32:
+ case 31:
+ case 30:
+ if (kTen9 <= number) {
+ *power = kTen9;
+ *exponent = 9;
+ break;
+ } // else fallthrough
+ case 29:
+ case 28:
+ case 27:
+ if (kTen8 <= number) {
+ *power = kTen8;
+ *exponent = 8;
+ break;
+ } // else fallthrough
+ case 26:
+ case 25:
+ case 24:
+ if (kTen7 <= number) {
+ *power = kTen7;
+ *exponent = 7;
+ break;
+ } // else fallthrough
+ case 23:
+ case 22:
+ case 21:
+ case 20:
+ if (kTen6 <= number) {
+ *power = kTen6;
+ *exponent = 6;
+ break;
+ } // else fallthrough
+ case 19:
+ case 18:
+ case 17:
+ if (kTen5 <= number) {
+ *power = kTen5;
+ *exponent = 5;
+ break;
+ } // else fallthrough
+ case 16:
+ case 15:
+ case 14:
+ if (kTen4 <= number) {
+ *power = kTen4;
+ *exponent = 4;
+ break;
+ } // else fallthrough
+ case 13:
+ case 12:
+ case 11:
+ case 10:
+ if (1000 <= number) {
+ *power = 1000;
+ *exponent = 3;
+ break;
+ } // else fallthrough
+ case 9:
+ case 8:
+ case 7:
+ if (100 <= number) {
+ *power = 100;
+ *exponent = 2;
+ break;
+ } // else fallthrough
+ case 6:
+ case 5:
+ case 4:
+ if (10 <= number) {
+ *power = 10;
+ *exponent = 1;
+ break;
+ } // else fallthrough
+ case 3:
+ case 2:
+ case 1:
+ if (1 <= number) {
+ *power = 1;
+ *exponent = 0;
+ break;
+ } // else fallthrough
+ case 0:
+ *power = 0;
+ *exponent = -1;
+ break;
+ default:
+ // Following assignments are here to silence compiler warnings.
+ *power = 0;
+ *exponent = 0;
+ UNREACHABLE();
+ }
+}
+
+
+// Same comments as for DigitGen but with additional precondition:
+// -60 <= w.e() <= -32
+//
+// Say, for the sake of example, that
+// w.e() == -48, and w.f() == 0x1234567890abcdef
+// w's value can be computed by w.f() * 2^w.e()
+// We can obtain w's integral digits by simply shifting w.f() by -w.e().
+// -> w's integral part is 0x1234
+// w's fractional part is therefore 0x567890abcdef.
+// Printing w's integral part is easy (simply print 0x1234 in decimal).
+// In order to print its fraction we repeatedly multiply the fraction by 10 and
+// get each digit. Example the first digit after the comma would be computed by
+// (0x567890abcdef * 10) >> 48. -> 3
+// The whole thing becomes slightly more complicated because we want to stop
+// once we have enough digits. That is, once the digits inside the buffer
+// represent 'w' we can stop. Everything inside the interval low - high
+// represents w. However we have to pay attention to low, high and w's
+// imprecision.
+template
+bool Grisu3::DigitGen_m60_m32(DiyFp low,
+ DiyFp w,
+ DiyFp high,
+ char* buffer,
+ int* length,
+ int* kappa) {
+ // low, w and high are imprecise, but by less than one ulp (unit in the last
+ // place).
+ // If we remove (resp. add) 1 ulp from low (resp. high) we are certain that
+ // the new numbers are outside of the interval we want the final
+ // representation to lie in.
+ // Inversely adding (resp. removing) 1 ulp from low (resp. high) would yield
+ // numbers that are certain to lie in the interval. We will use this fact
+ // later on.
+ // We will now start by generating the digits within the uncertain
+ // interval. Later we will weed out representations that lie outside the safe
+ // interval and thus _might_ lie outside the correct interval.
+ uint64_t unit = 1;
+ DiyFp too_low = DiyFp(low.f() - unit, low.e());
+ DiyFp too_high = DiyFp(high.f() + unit, high.e());
+ // too_low and too_high are guaranteed to lie outside the interval we want the
+ // generated number in.
+ DiyFp unsafe_interval = DiyFp::Minus(too_high, too_low);
+ // We now cut the input number into two parts: the integral digits and the
+ // fractionals. We will not write any decimal separator though, but adapt
+ // kappa instead.
+ // Reminder: we are currently computing the digits (stored inside the buffer)
+ // such that: too_low < buffer * 10^kappa < too_high
+ // We use too_high for the digit_generation and stop as soon as possible.
+ // If we stop early we effectively round down.
+ DiyFp one = DiyFp(static_cast(1) << -w.e(), w.e());
+ // Division by one is a shift.
+ uint32_t integrals = static_cast(too_high.f() >> -one.e());
+ // Modulo by one is an and.
+ uint64_t fractionals = too_high.f() & (one.f() - 1);
+ uint32_t divider;
+ int divider_exponent;
+ BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
+ ÷r, ÷r_exponent);
+ *kappa = divider_exponent + 1;
+ *length = 0;
+ // Loop invariant: buffer = too_high / 10^kappa (integer division)
+ // The invariant holds for the first iteration: kappa has been initialized
+ // with the divider exponent + 1. And the divider is the biggest power of ten
+ // that is smaller than integrals.
+ while (*kappa > 0) {
+ int digit = integrals / divider;
+ buffer[*length] = '0' + digit;
+ (*length)++;
+ integrals %= divider;
+ (*kappa)--;
+ // Note that kappa now equals the exponent of the divider and that the
+ // invariant thus holds again.
+ uint64_t rest =
+ (static_cast(integrals) << -one.e()) + fractionals;
+ // Invariant: too_high = buffer * 10^kappa + DiyFp(rest, one.e())
+ // Reminder: unsafe_interval.e() == one.e()
+ if (rest < unsafe_interval.f()) {
+ // Rounding down (by not emitting the remaining digits) yields a number
+ // that lies within the unsafe interval.
+ return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f(),
+ unsafe_interval.f(), rest,
+ static_cast(divider) << -one.e(), unit);
+ }
+ divider /= 10;
+ }
+
+ // The integrals have been generated. We are at the point of the decimal
+ // separator. In the following loop we simply multiply the remaining digits by
+ // 10 and divide by one. We just need to pay attention to multiply associated
+ // data (like the interval or 'unit'), too.
+ // Instead of multiplying by 10 we multiply by 5 (cheaper operation) and
+ // increase its (imaginary) exponent. At the same time we decrease the
+ // divider's (one's) exponent and shift its significand.
+ // Basically, if fractionals was a DiyFp (with fractionals.e == one.e):
+ // fractionals.f *= 10;
+ // fractionals.f >>= 1; fractionals.e++; // value remains unchanged.
+ // one.f >>= 1; one.e++; // value remains unchanged.
+ // and we have again fractionals.e == one.e which allows us to divide
+ // fractionals.f() by one.f()
+ // We simply combine the *= 10 and the >>= 1.
+ while (true) {
+ fractionals *= 5;
+ unit *= 5;
+ unsafe_interval.set_f(unsafe_interval.f() * 5);
+ unsafe_interval.set_e(unsafe_interval.e() + 1); // Will be optimized out.
+ one.set_f(one.f() >> 1);
+ one.set_e(one.e() + 1);
+ // Integer division by one.
+ int digit = static_cast(fractionals >> -one.e());
+ buffer[*length] = '0' + digit;
+ (*length)++;
+ fractionals &= one.f() - 1; // Modulo by one.
+ (*kappa)--;
+ if (fractionals < unsafe_interval.f()) {
+ return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f() * unit,
+ unsafe_interval.f(), fractionals, one.f(), unit);
+ }
+ }
+}
+
+
+// Rounds the given generated digits in the buffer and weeds out generated
+// digits that are not in the safe interval, or where we cannot find a rounded
+// representation.
+// Input: * buffer containing the digits of too_high / 10^kappa
+// * the buffer's length
+// * distance_too_high_w == (too_high - w).f() * unit
+// * unsafe_interval == (too_high - too_low).f() * unit
+// * rest = (too_high - buffer * 10^kappa).f() * unit
+// * ten_kappa = 10^kappa * unit
+// * unit = the common multiplier
+// Output: returns true on success.
+// Modifies the generated digits in the buffer to approach (round towards) w.
+template
+bool Grisu3::RoundWeed(char* buffer,
+ int length,
+ uint64_t distance_too_high_w,
+ uint64_t unsafe_interval,
+ uint64_t rest,
+ uint64_t ten_kappa,
+ uint64_t unit) {
+ uint64_t small_distance = distance_too_high_w - unit;
+ uint64_t big_distance = distance_too_high_w + unit;
+ // Let w- = too_high - big_distance, and
+ // w+ = too_high - small_distance.
+ // Note: w- < w < w+
+ //
+ // The real w (* unit) must lie somewhere inside the interval
+ // ]w-; w+[ (often written as "(w-; w+)")
+
+ // Basically the buffer currently contains a number in the unsafe interval
+ // ]too_low; too_high[ with too_low < w < too_high
+ //
+ // By generating the digits of too_high we got the biggest last digit.
+ // In the case that w+ < buffer < too_high we try to decrement the buffer.
+ // This way the buffer approaches (rounds towards) w.
+ // There are 3 conditions that stop the decrementation process:
+ // 1) the buffer is already below w+
+ // 2) decrementing the buffer would make it leave the unsafe interval
+ // 3) decrementing the buffer would yield a number below w+ and farther away
+ // than the current number. In other words:
+ // (buffer{-1} < w+) && w+ - buffer{-1} > buffer - w+
+ // Instead of using the buffer directly we use its distance to too_high.
+ // Conceptually rest ~= too_high - buffer
+ while (rest < small_distance && // Negated condition 1
+ unsafe_interval - rest >= ten_kappa && // Negated condition 2
+ (rest + ten_kappa < small_distance || // buffer{-1} > w+
+ small_distance - rest >= rest + ten_kappa - small_distance)) {
+ buffer[length - 1]--;
+ rest += ten_kappa;
+ }
+
+ // We have approached w+ as much as possible. We now test if approaching w-
+ // would require changing the buffer. If yes, then we have two possible
+ // representations close to w, but we cannot decide which one is closer.
+ if (rest < big_distance &&
+ unsafe_interval - rest >= ten_kappa &&
+ (rest + ten_kappa < big_distance ||
+ big_distance - rest > rest + ten_kappa - big_distance)) {
+ return false;
+ }
+
+ // Weeding test.
+ // The safe interval is [too_low + 2 ulp; too_high - 2 ulp]
+ // Since too_low = too_high - unsafe_interval this is equivalent too
+ // [too_high - unsafe_interval + 4 ulp; too_high - 2 ulp]
+ // Conceptually we have: rest ~= too_high - buffer
+ return (2 * unit <= rest) && (rest <= unsafe_interval - 4 * unit);
+}
+
+
+bool grisu3(double v, char* buffer, int* sign, int* length, int* point) {
+ ASSERT(v != 0);
+ ASSERT(!Double(v).IsSpecial());
+
+ if (v < 0) {
+ v = -v;
+ *sign = 1;
+ } else {
+ *sign = 0;
+ }
+ int decimal_exponent;
+ bool result = Grisu3<-60, -32>::grisu3(v, buffer, length, &decimal_exponent);
+ *point = *length + decimal_exponent;
+ buffer[*length] = '\0';
+ return result;
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/grisu3.h b/deps/v8/src/grisu3.h
new file mode 100644
index 0000000000..b41ca379f4
--- /dev/null
+++ b/deps/v8/src/grisu3.h
@@ -0,0 +1,55 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_GRISU3_H_
+#define V8_GRISU3_H_
+
+namespace v8 {
+namespace internal {
+
+// Grisu3 will produce at most kGrisu3MaximalLength digits. This does not
+// include the terminating '\0' character.
+static const int kGrisu3MaximalLength = 17;
+
+// Provides a decimal representation of v.
+// v must satisfy v != 0 and it must not be Infinity or NaN.
+// Returns true if it succeeds, otherwise the result can not be trusted.
+// There will be *length digits inside the buffer followed by a null terminator.
+// If the function returns true then
+// v == (double) (buffer * 10^(decimal-point - length)).
+// The digits in the buffer are the shortest representation possible: no
+// 0.099999999999 instead of 0.1.
+// The last digit will be closest to the actual v. That is, even if several
+// digits might correctly yield 'v' when read again, the buffer will contain the
+// one closest to v.
+// The variable 'sign' will be '0' if the given number is positive, and '1'
+// otherwise.
+bool grisu3(double d, char* buffer, int* sign, int* length, int* decimal_point);
+
+} } // namespace v8::internal
+
+#endif // V8_GRISU3_H_
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index fbe04640db..d666875422 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -2571,11 +2571,9 @@ Object* Heap::CopyJSObject(JSObject* source) {
reinterpret_cast(source->address()),
object_size);
// Update write barrier for all fields that lie beyond the header.
- for (int offset = JSObject::kHeaderSize;
- offset < object_size;
- offset += kPointerSize) {
- RecordWrite(clone_address, offset);
- }
+ RecordWrites(clone_address,
+ JSObject::kHeaderSize,
+ object_size - JSObject::kHeaderSize);
} else {
clone = new_space_.AllocateRaw(object_size);
if (clone->IsFailure()) return clone;
@@ -2906,12 +2904,9 @@ Object* Heap::AllocateFixedArray(int length) {
reinterpret_cast(result)->set_map(fixed_array_map());
FixedArray* array = FixedArray::cast(result);
array->set_length(length);
- Object* value = undefined_value();
// Initialize body.
- for (int index = 0; index < length; index++) {
- ASSERT(!Heap::InNewSpace(value)); // value = undefined
- array->set(index, value, SKIP_WRITE_BARRIER);
- }
+ ASSERT(!Heap::InNewSpace(undefined_value()));
+ MemsetPointer(array->data_start(), undefined_value(), length);
}
return result;
}
@@ -2963,11 +2958,8 @@ Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
reinterpret_cast(result)->set_map(fixed_array_map());
FixedArray* array = FixedArray::cast(result);
array->set_length(length);
- Object* value = undefined_value();
- for (int index = 0; index < length; index++) {
- ASSERT(!Heap::InNewSpace(value)); // value = undefined
- array->set(index, value, SKIP_WRITE_BARRIER);
- }
+ ASSERT(!Heap::InNewSpace(undefined_value()));
+ MemsetPointer(array->data_start(), undefined_value(), length);
return array;
}
@@ -2994,9 +2986,7 @@ Object* Heap::AllocateFixedArrayWithHoles(int length) {
array->set_length(length);
// Initialize body.
ASSERT(!Heap::InNewSpace(the_hole_value()));
- MemsetPointer(HeapObject::RawField(array, FixedArray::kHeaderSize),
- the_hole_value(),
- length);
+ MemsetPointer(array->data_start(), the_hole_value(), length);
}
return result;
}
@@ -3409,7 +3399,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
v->Synchronize("strong_root_list");
- v->VisitPointer(bit_cast(&hidden_symbol_));
+ v->VisitPointer(BitCast(&hidden_symbol_));
v->Synchronize("symbol");
Bootstrapper::Iterate(v);
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index f13556bd7a..7f0d5d4385 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -2148,6 +2148,17 @@ void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::movmskpd(Register dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x50);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::movdqa(const Operand& dst, XMMRegister src ) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2283,6 +2294,11 @@ void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
}
+void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
+ EMIT(0xC0 | dst.code() << 3 | src.code());
+}
+
+
void Assembler::Print() {
Disassembler::Decode(stdout, buffer_, pc_);
}
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 4497e2aa70..d8cd57a2a7 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -769,6 +769,7 @@ class Assembler : public Malloced {
void comisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src);
+ void movmskpd(Register dst, XMMRegister src);
void movdqa(XMMRegister dst, const Operand& src);
void movdqa(const Operand& dst, XMMRegister src);
@@ -828,7 +829,7 @@ class Assembler : public Malloced {
void emit_sse_operand(XMMRegister reg, const Operand& adr);
void emit_sse_operand(XMMRegister dst, XMMRegister src);
-
+ void emit_sse_operand(Register dst, XMMRegister src);
private:
byte* addr_at(int pos) { return buffer_ + pos; }
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index bdd1c4a3f4..703fffe07e 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -2286,61 +2286,69 @@ void CodeGenerator::Comparison(AstNode* node,
// a jump target and branching to duplicate the virtual frame at
// the first split. We manually handle the off-frame references
// by reconstituting them on the non-fall-through path.
- JumpTarget is_smi;
- __ test(left_side.reg(), Immediate(kSmiTagMask));
- is_smi.Branch(zero, taken);
- bool is_for_loop_compare = (node->AsCompareOperation() != NULL)
- && node->AsCompareOperation()->is_for_loop_condition();
- if (!is_for_loop_compare
- && CpuFeatures::IsSupported(SSE2)
- && right_val->IsSmi()) {
- // Right side is a constant smi and left side has been checked
- // not to be a smi.
- CpuFeatures::Scope use_sse2(SSE2);
- JumpTarget not_number;
- __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
- not_number.Branch(not_equal, &left_side);
- __ movdbl(xmm1,
- FieldOperand(left_reg, HeapNumber::kValueOffset));
- int value = Smi::cast(*right_val)->value();
- if (value == 0) {
- __ xorpd(xmm0, xmm0);
- } else {
- Result temp = allocator()->Allocate();
- __ mov(temp.reg(), Immediate(value));
- __ cvtsi2sd(xmm0, Operand(temp.reg()));
- temp.Unuse();
+ if (left_side.is_smi()) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(left_side.reg(), "Argument not a smi");
}
- __ comisd(xmm1, xmm0);
- // Jump to builtin for NaN.
- not_number.Branch(parity_even, &left_side);
- left_side.Unuse();
- Condition double_cc = cc;
- switch (cc) {
- case less: double_cc = below; break;
- case equal: double_cc = equal; break;
- case less_equal: double_cc = below_equal; break;
- case greater: double_cc = above; break;
- case greater_equal: double_cc = above_equal; break;
- default: UNREACHABLE();
+ } else {
+ JumpTarget is_smi;
+ __ test(left_side.reg(), Immediate(kSmiTagMask));
+ is_smi.Branch(zero, taken);
+
+ bool is_for_loop_compare = (node->AsCompareOperation() != NULL)
+ && node->AsCompareOperation()->is_for_loop_condition();
+ if (!is_for_loop_compare
+ && CpuFeatures::IsSupported(SSE2)
+ && right_val->IsSmi()) {
+ // Right side is a constant smi and left side has been checked
+ // not to be a smi.
+ CpuFeatures::Scope use_sse2(SSE2);
+ JumpTarget not_number;
+ __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
+ Immediate(Factory::heap_number_map()));
+ not_number.Branch(not_equal, &left_side);
+ __ movdbl(xmm1,
+ FieldOperand(left_reg, HeapNumber::kValueOffset));
+ int value = Smi::cast(*right_val)->value();
+ if (value == 0) {
+ __ xorpd(xmm0, xmm0);
+ } else {
+ Result temp = allocator()->Allocate();
+ __ mov(temp.reg(), Immediate(value));
+ __ cvtsi2sd(xmm0, Operand(temp.reg()));
+ temp.Unuse();
+ }
+ __ comisd(xmm1, xmm0);
+ // Jump to builtin for NaN.
+ not_number.Branch(parity_even, &left_side);
+ left_side.Unuse();
+ Condition double_cc = cc;
+ switch (cc) {
+ case less: double_cc = below; break;
+ case equal: double_cc = equal; break;
+ case less_equal: double_cc = below_equal; break;
+ case greater: double_cc = above; break;
+ case greater_equal: double_cc = above_equal; break;
+ default: UNREACHABLE();
+ }
+ dest->true_target()->Branch(double_cc);
+ dest->false_target()->Jump();
+ not_number.Bind(&left_side);
}
- dest->true_target()->Branch(double_cc);
+
+ // Setup and call the compare stub.
+ CompareStub stub(cc, strict, kCantBothBeNaN);
+ Result result = frame_->CallStub(&stub, &left_side, &right_side);
+ result.ToRegister();
+ __ cmp(result.reg(), 0);
+ result.Unuse();
+ dest->true_target()->Branch(cc);
dest->false_target()->Jump();
- not_number.Bind(&left_side);
- }
- // Setup and call the compare stub.
- CompareStub stub(cc, strict, kCantBothBeNaN);
- Result result = frame_->CallStub(&stub, &left_side, &right_side);
- result.ToRegister();
- __ cmp(result.reg(), 0);
- result.Unuse();
- dest->true_target()->Branch(cc);
- dest->false_target()->Jump();
+ is_smi.Bind();
+ }
- is_smi.Bind();
left_side = Result(left_reg);
right_side = Result(right_val);
// Test smi equality and comparison by signed int comparison.
@@ -3579,6 +3587,24 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
}
CheckStack(); // TODO(1222600): ignore if body contains calls.
+
+ // If we have (a) a loop with a compile-time constant trip count
+ // and (b) the loop induction variable is not assignend inside the
+ // loop we update the number type of the induction variable to be smi.
+
+ if (node->is_fast_smi_loop()) {
+ // Set number type of the loop variable to smi.
+ Slot* slot = node->loop_variable()->slot();
+ ASSERT(slot->type() == Slot::LOCAL);
+ frame_->SetTypeForLocalAt(slot->index(), NumberInfo::Smi());
+ if (FLAG_debug_code) {
+ frame_->PushLocalAt(slot->index());
+ Result var = frame_->Pop();
+ var.ToRegister();
+ __ AbortIfNotSmi(var.reg(), "Loop variable not a smi.");
+ }
+ }
+
Visit(node->body());
// If there is an update expression, compile it if necessary.
@@ -4754,11 +4780,13 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
frame_->Push(Smi::FromInt(node->literal_index()));
// Constant properties.
frame_->Push(node->constant_properties());
+ // Should the object literal have fast elements?
+ frame_->Push(Smi::FromInt(node->fast_elements() ? 1 : 0));
Result clone;
if (node->depth() > 1) {
- clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 3);
+ clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
- clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
+ clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
}
frame_->Push(&clone);
@@ -5912,7 +5940,7 @@ void CodeGenerator::GenerateSetValueOf(ZoneList* args) {
}
-void CodeGenerator::GenerateArgumentsAccess(ZoneList* args) {
+void CodeGenerator::GenerateArguments(ZoneList* args) {
ASSERT(args->length() == 1);
// ArgumentsAccessStub expects the key in edx and the formal
@@ -6624,15 +6652,6 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
__ Set(tmp.reg(), Immediate(0));
}
- DeferredCode* deferred = NULL;
- if (is_postfix) {
- deferred = new DeferredPostfixCountOperation(new_value.reg(),
- old_value.reg(),
- is_increment);
- } else {
- deferred = new DeferredPrefixCountOperation(new_value.reg(),
- is_increment);
- }
if (is_increment) {
__ add(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
@@ -6640,24 +6659,41 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
__ sub(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
}
- // If the count operation didn't overflow and the result is a valid
- // smi, we're done. Otherwise, we jump to the deferred slow-case
- // code.
- if (tmp.is_valid()) {
- // We combine the overflow and the smi tag check if we could
- // successfully allocate a temporary byte register.
- __ setcc(overflow, tmp.reg());
- __ or_(Operand(tmp.reg()), new_value.reg());
- __ test(tmp.reg(), Immediate(kSmiTagMask));
- tmp.Unuse();
- deferred->Branch(not_zero);
+ if (new_value.is_smi()) {
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(new_value.reg(), "Argument not a smi");
+ }
+ if (tmp.is_valid()) tmp.Unuse();
} else {
- // Otherwise we test separately for overflow and smi tag.
- deferred->Branch(overflow);
- __ test(new_value.reg(), Immediate(kSmiTagMask));
- deferred->Branch(not_zero);
+ DeferredCode* deferred = NULL;
+ if (is_postfix) {
+ deferred = new DeferredPostfixCountOperation(new_value.reg(),
+ old_value.reg(),
+ is_increment);
+ } else {
+ deferred = new DeferredPrefixCountOperation(new_value.reg(),
+ is_increment);
+ }
+
+ // If the count operation didn't overflow and the result is a valid
+ // smi, we're done. Otherwise, we jump to the deferred slow-case
+ // code.
+ if (tmp.is_valid()) {
+ // We combine the overflow and the smi tag check if we could
+ // successfully allocate a temporary byte register.
+ __ setcc(overflow, tmp.reg());
+ __ or_(Operand(tmp.reg()), new_value.reg());
+ __ test(tmp.reg(), Immediate(kSmiTagMask));
+ tmp.Unuse();
+ deferred->Branch(not_zero);
+ } else {
+ // Otherwise we test separately for overflow and smi tag.
+ deferred->Branch(overflow);
+ __ test(new_value.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ }
+ deferred->BindExit();
}
- deferred->BindExit();
// Postfix: store the old value in the allocated slot under the
// reference.
@@ -6823,8 +6859,15 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
overwrite_mode = OVERWRITE_RIGHT;
}
- Load(node->left());
- Load(node->right());
+ if (node->left()->IsTrivial()) {
+ Load(node->right());
+ Result right = frame_->Pop();
+ frame_->Push(node->left());
+ frame_->Push(&right);
+ } else {
+ Load(node->left());
+ Load(node->right());
+ }
GenericBinaryOperation(node->op(), node->type(), overwrite_mode);
}
}
@@ -7024,8 +7067,20 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
default:
UNREACHABLE();
}
- if (!left_already_loaded) Load(left);
- Load(right);
+
+ if (left->IsTrivial()) {
+ if (!left_already_loaded) {
+ Load(right);
+ Result right_result = frame_->Pop();
+ frame_->Push(left);
+ frame_->Push(&right_result);
+ } else {
+ Load(right);
+ }
+ } else {
+ if (!left_already_loaded) Load(left);
+ Load(right);
+ }
Comparison(node, cc, strict, destination());
}
@@ -10162,6 +10217,12 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
}
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ masm->RecordWriteHelper(object_, addr_, scratch_);
+ masm->ret(0);
+}
+
+
void CompareStub::Generate(MacroAssembler* masm) {
Label call_builtin, done;
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index 79cad72685..f7ec0b580e 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -339,6 +339,10 @@ class CodeGenerator: public AstVisitor {
bool in_spilled_code() const { return in_spilled_code_; }
void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
+ // If the name is an inline runtime function call return the number of
+ // expected arguments. Otherwise return -1.
+ static int InlineRuntimeCallArgumentsCount(Handle name);
+
private:
// Construction/Destruction
explicit CodeGenerator(MacroAssembler* masm);
@@ -522,6 +526,7 @@ class CodeGenerator: public AstVisitor {
struct InlineRuntimeLUT {
void (CodeGenerator::*method)(ZoneList*);
const char* name;
+ int nargs;
};
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle name);
@@ -555,7 +560,7 @@ class CodeGenerator: public AstVisitor {
// Support for arguments.length and arguments[?].
void GenerateArgumentsLength(ZoneList* args);
- void GenerateArgumentsAccess(ZoneList* args);
+ void GenerateArguments(ZoneList* args);
// Support for accessing the class and value fields of an object.
void GenerateClassOf(ZoneList* args);
@@ -593,14 +598,10 @@ class CodeGenerator: public AstVisitor {
// Fast support for number to string.
void GenerateNumberToString(ZoneList* args);
- // Fast support for Math.pow().
+ // Fast call to math functions.
void GenerateMathPow(ZoneList* args);
-
- // Fast call to transcendental functions.
void GenerateMathSin(ZoneList* args);
void GenerateMathCos(ZoneList* args);
-
- // Fast case for sqrt
void GenerateMathSqrt(ZoneList* args);
// Simple condition analysis.
@@ -976,6 +977,42 @@ class NumberToStringStub: public CodeStub {
};
+class RecordWriteStub : public CodeStub {
+ public:
+ RecordWriteStub(Register object, Register addr, Register scratch)
+ : object_(object), addr_(addr), scratch_(scratch) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Register object_;
+ Register addr_;
+ Register scratch_;
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
+ object_.code(), addr_.code(), scratch_.code());
+ }
+#endif
+
+ // Minor key encoding in 12 bits of three registers (object, address and
+ // scratch) OOOOAAAASSSS.
+ class ScratchBits: public BitField {};
+ class AddressBits: public BitField {};
+ class ObjectBits: public BitField {};
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ // Encode the registers.
+ return ObjectBits::encode(object_.code()) |
+ AddressBits::encode(addr_.code()) |
+ ScratchBits::encode(scratch_.code());
+ }
+};
+
+
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 0d85b10e7e..8d342e087c 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -1069,12 +1069,26 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector out_buffer,
} else {
UnimplementedInstruction();
}
- } else if (*data == 0x2F) {
+ } else if (*data == 0x2E || *data == 0x2F) {
+ const char* mnem = (*data == 0x2E) ? "ucomisd" : "comisd";
data++;
int mod, regop, rm;
get_modrm(*data, &mod, ®op, &rm);
- AppendToBuffer("comisd %s,%s",
- NameOfXMMRegister(regop),
+ if (mod == 0x3) {
+ AppendToBuffer("%s %s,%s", mnem,
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else {
+ AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
+ }
+ } else if (*data == 0x50) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("movmskpd %s,%s",
+ NameOfCPURegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (*data == 0x57) {
@@ -1198,6 +1212,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector out_buffer,
const char* mnem = "?";
switch (b2) {
case 0x2A: mnem = "cvtsi2sd"; break;
+ case 0x2C: mnem = "cvttsd2si"; break;
case 0x51: mnem = "sqrtsd"; break;
case 0x58: mnem = "addsd"; break;
case 0x59: mnem = "mulsd"; break;
@@ -1208,14 +1223,38 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector out_buffer,
int mod, regop, rm;
get_modrm(*data, &mod, ®op, &rm);
if (b2 == 0x2A) {
- AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
- data += PrintRightOperand(data);
+ if (mod != 0x3) {
+ AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
+ } else {
+ AppendToBuffer("%s %s,%s",
+ mnem,
+ NameOfXMMRegister(regop),
+ NameOfCPURegister(rm));
+ data++;
+ }
+ } else if (b2 == 0x2C) {
+ if (mod != 0x3) {
+ AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
+ data += PrintRightOperand(data);
+ } else {
+ AppendToBuffer("%s %s,%s",
+ mnem,
+ NameOfCPURegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ }
} else {
- AppendToBuffer("%s %s,%s",
- mnem,
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
+ if (mod != 0x3) {
+ AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
+ } else {
+ AppendToBuffer("%s %s,%s",
+ mnem,
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ }
}
}
} else {
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 6e3ae105d1..cedf9c95fd 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -901,10 +901,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(expr->constant_properties()));
+ __ push(Immediate(Smi::FromInt(expr->fast_elements() ? 1 : 0)));
if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
- __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
+ __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
}
// If result_saved is true the result is on top of the stack. If
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 555cd1bf9b..3928661e9e 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -152,6 +152,108 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
}
+static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register key,
+ Register r0,
+ Register r1,
+ Register r2) {
+ // Register use:
+ //
+ // elements - holds the slow-case elements of the receiver and is unchanged.
+ //
+ // key - holds the smi key on entry and is unchanged if a branch is
+ // performed to the miss label. If the load succeeds and we
+ // fall through, key holds the result on exit.
+ //
+ // Scratch registers:
+ //
+ // r0 - holds the untagged key on entry and holds the hash once computed.
+ //
+ // r1 - used to hold the capacity mask of the dictionary
+ //
+ // r2 - used for the index into the dictionary.
+ Label done;
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash << 15);
+ __ mov(r1, r0);
+ __ not_(r0);
+ __ shl(r1, 15);
+ __ add(r0, Operand(r1));
+ // hash = hash ^ (hash >> 12);
+ __ mov(r1, r0);
+ __ shr(r1, 12);
+ __ xor_(r0, Operand(r1));
+ // hash = hash + (hash << 2);
+ __ lea(r0, Operand(r0, r0, times_4, 0));
+ // hash = hash ^ (hash >> 4);
+ __ mov(r1, r0);
+ __ shr(r1, 4);
+ __ xor_(r0, Operand(r1));
+ // hash = hash * 2057;
+ __ imul(r0, r0, 2057);
+ // hash = hash ^ (hash >> 16);
+ __ mov(r1, r0);
+ __ shr(r1, 16);
+ __ xor_(r0, Operand(r1));
+
+ // Compute capacity mask.
+ const int kCapacityOffset =
+ NumberDictionary::kHeaderSize +
+ NumberDictionary::kCapacityIndex * kPointerSize;
+ __ mov(r1, FieldOperand(elements, kCapacityOffset));
+ __ shr(r1, kSmiTagSize); // convert smi to int
+ __ dec(r1);
+
+ const int kElementsStartOffset =
+ NumberDictionary::kHeaderSize +
+ NumberDictionary::kElementsStartIndex * kPointerSize;
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ const int kProbes = 4;
+ for (int i = 0; i < kProbes; i++) {
+ // Use r2 for index calculations and keep the hash intact in r0.
+ __ mov(r2, r0);
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ __ add(Operand(r2), Immediate(NumberDictionary::GetProbeOffset(i)));
+ }
+ __ and_(r2, Operand(r1));
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(NumberDictionary::kEntrySize == 3);
+ __ lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
+
+ // Check if the key matches.
+ __ cmp(key, FieldOperand(elements,
+ r2,
+ times_pointer_size,
+ kElementsStartOffset));
+ if (i != (kProbes - 1)) {
+ __ j(equal, &done, taken);
+ } else {
+ __ j(not_equal, miss, not_taken);
+ }
+ }
+
+ __ bind(&done);
+ // Check that the value is a normal propety.
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ ASSERT_EQ(NORMAL, 0);
+ __ test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
+ Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
+ __ j(not_zero, miss);
+
+ // Get the value at the masked, scaled index.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ mov(key, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
+}
+
+
// Helper function used to check that a value is either not an object
// or is loaded if it is an object.
static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
@@ -225,6 +327,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -----------------------------------
Label slow, check_string, index_int, index_string;
Label check_pixel_array, probe_dictionary;
+ Label check_number_dictionary;
// Check that the object isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
@@ -273,7 +376,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ebx: untagged index
// eax: key
// ecx: elements
- __ CheckMap(ecx, Factory::pixel_array_map(), &slow, true);
+ __ CheckMap(ecx, Factory::pixel_array_map(), &check_number_dictionary, true);
__ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
__ mov(eax, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
@@ -281,6 +384,32 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ SmiTag(eax);
__ ret(0);
+ __ bind(&check_number_dictionary);
+ // Check whether the elements is a number dictionary.
+ // edx: receiver
+ // ebx: untagged index
+ // eax: key
+ // ecx: elements
+ __ CheckMap(ecx, Factory::hash_table_map(), &slow, true);
+ Label slow_pop_receiver;
+ // Push receiver on the stack to free up a register for the dictionary
+ // probing.
+ __ push(edx);
+ GenerateNumberDictionaryLoad(masm,
+ &slow_pop_receiver,
+ ecx,
+ eax,
+ ebx,
+ edx,
+ edi);
+ // Pop receiver before returning.
+ __ pop(edx);
+ __ ret(0);
+
+ __ bind(&slow_pop_receiver);
+ // Pop the receiver from the stack and jump to runtime.
+ __ pop(edx);
+
__ bind(&slow);
// Slow case: jump to runtime.
// edx: receiver
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 45e24fa737..703ca793aa 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -47,33 +47,32 @@ MacroAssembler::MacroAssembler(void* buffer, int size)
}
-static void RecordWriteHelper(MacroAssembler* masm,
- Register object,
- Register addr,
- Register scratch) {
+void MacroAssembler::RecordWriteHelper(Register object,
+ Register addr,
+ Register scratch) {
Label fast;
// Compute the page start address from the heap object pointer, and reuse
// the 'object' register for it.
- masm->and_(object, ~Page::kPageAlignmentMask);
+ and_(object, ~Page::kPageAlignmentMask);
Register page_start = object;
// Compute the bit addr in the remembered set/index of the pointer in the
// page. Reuse 'addr' as pointer_offset.
- masm->sub(addr, Operand(page_start));
- masm->shr(addr, kObjectAlignmentBits);
+ sub(addr, Operand(page_start));
+ shr(addr, kObjectAlignmentBits);
Register pointer_offset = addr;
// If the bit offset lies beyond the normal remembered set range, it is in
// the extra remembered set area of a large object.
- masm->cmp(pointer_offset, Page::kPageSize / kPointerSize);
- masm->j(less, &fast);
+ cmp(pointer_offset, Page::kPageSize / kPointerSize);
+ j(less, &fast);
// Adjust 'page_start' so that addressing using 'pointer_offset' hits the
// extra remembered set after the large object.
// Find the length of the large object (FixedArray).
- masm->mov(scratch, Operand(page_start, Page::kObjectStartOffset
+ mov(scratch, Operand(page_start, Page::kObjectStartOffset
+ FixedArray::kLengthOffset));
Register array_length = scratch;
@@ -83,59 +82,40 @@ static void RecordWriteHelper(MacroAssembler* masm,
// Add the delta between the end of the normal RSet and the start of the
// extra RSet to 'page_start', so that addressing the bit using
// 'pointer_offset' hits the extra RSet words.
- masm->lea(page_start,
- Operand(page_start, array_length, times_pointer_size,
- Page::kObjectStartOffset + FixedArray::kHeaderSize
- - Page::kRSetEndOffset));
+ lea(page_start,
+ Operand(page_start, array_length, times_pointer_size,
+ Page::kObjectStartOffset + FixedArray::kHeaderSize
+ - Page::kRSetEndOffset));
// NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
// to limit code size. We should probably evaluate this decision by
// measuring the performance of an equivalent implementation using
// "simpler" instructions
- masm->bind(&fast);
- masm->bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
+ bind(&fast);
+ bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
}
-class RecordWriteStub : public CodeStub {
- public:
- RecordWriteStub(Register object, Register addr, Register scratch)
- : object_(object), addr_(addr), scratch_(scratch) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Register object_;
- Register addr_;
- Register scratch_;
-
-#ifdef DEBUG
- void Print() {
- PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
- object_.code(), addr_.code(), scratch_.code());
- }
-#endif
-
- // Minor key encoding in 12 bits of three registers (object, address and
- // scratch) OOOOAAAASSSS.
- class ScratchBits: public BitField {};
- class AddressBits: public BitField {};
- class ObjectBits: public BitField {};
-
- Major MajorKey() { return RecordWrite; }
-
- int MinorKey() {
- // Encode the registers.
- return ObjectBits::encode(object_.code()) |
- AddressBits::encode(addr_.code()) |
- ScratchBits::encode(scratch_.code());
+void MacroAssembler::InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ Label* branch) {
+ if (Serializer::enabled()) {
+ // Can't do arithmetic on external references if it might get serialized.
+ mov(scratch, Operand(object));
+ // The mask isn't really an address. We load it as an external reference in
+ // case the size of the new space is different between the snapshot maker
+ // and the running system.
+ and_(Operand(scratch), Immediate(ExternalReference::new_space_mask()));
+ cmp(Operand(scratch), Immediate(ExternalReference::new_space_start()));
+ j(cc, branch);
+ } else {
+ int32_t new_space_start = reinterpret_cast(
+ ExternalReference::new_space_start().address());
+ lea(scratch, Operand(object, -new_space_start));
+ and_(scratch, Heap::NewSpaceMask());
+ j(cc, branch);
}
-};
-
-
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- RecordWriteHelper(masm, object_, addr_, scratch_);
- masm->ret(0);
}
@@ -161,22 +141,7 @@ void MacroAssembler::RecordWrite(Register object, int offset,
test(value, Immediate(kSmiTagMask));
j(zero, &done);
- if (Serializer::enabled()) {
- // Can't do arithmetic on external references if it might get serialized.
- mov(value, Operand(object));
- // The mask isn't really an address. We load it as an external reference in
- // case the size of the new space is different between the snapshot maker
- // and the running system.
- and_(Operand(value), Immediate(ExternalReference::new_space_mask()));
- cmp(Operand(value), Immediate(ExternalReference::new_space_start()));
- j(equal, &done);
- } else {
- int32_t new_space_start = reinterpret_cast(
- ExternalReference::new_space_start().address());
- lea(value, Operand(object, -new_space_start));
- and_(value, Heap::NewSpaceMask());
- j(equal, &done);
- }
+ InNewSpace(object, value, equal, &done);
if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
// Compute the bit offset in the remembered set, leave it in 'value'.
@@ -209,7 +174,7 @@ void MacroAssembler::RecordWrite(Register object, int offset,
// If we are already generating a shared stub, not inlining the
// record write code isn't going to save us any memory.
if (generating_stub()) {
- RecordWriteHelper(this, object, dst, value);
+ RecordWriteHelper(object, dst, value);
} else {
RecordWriteStub stub(object, dst, value);
CallStub(&stub);
@@ -221,9 +186,9 @@ void MacroAssembler::RecordWrite(Register object, int offset,
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
if (FLAG_debug_code) {
- mov(object, Immediate(bit_cast(kZapValue)));
- mov(value, Immediate(bit_cast(kZapValue)));
- mov(scratch, Immediate(bit_cast(kZapValue)));
+ mov(object, Immediate(BitCast(kZapValue)));
+ mov(value, Immediate(BitCast(kZapValue)));
+ mov(scratch, Immediate(BitCast(kZapValue)));
}
}
@@ -397,6 +362,12 @@ void MacroAssembler::AbortIfNotNumber(Register object, const char* msg) {
}
+void MacroAssembler::AbortIfNotSmi(Register object, const char* msg) {
+ test(object, Immediate(kSmiTagMask));
+ Assert(equal, msg);
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(ebp);
mov(ebp, Operand(esp));
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index a284b63676..00243d76fd 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -48,6 +48,18 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// GC Support
+
+ void RecordWriteHelper(Register object,
+ Register addr,
+ Register scratch);
+
+ // Check if object is in new space.
+ // scratch can be object itself, but it will be clobbered.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cc, // equal for new space, not_equal otherwise.
+ Label* branch);
+
// Set the remembered set bit for [object+offset].
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
@@ -179,6 +191,9 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a number. Used in debug code.
void AbortIfNotNumber(Register object, const char* msg);
+ // Abort execution if argument is not a smi. Used in debug code.
+ void AbortIfNotSmi(Register object, const char* msg);
+
// ---------------------------------------------------------------------------
// Exception handling
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index a7e9a69a04..902a5b8106 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -549,9 +549,8 @@ class CallOptimization BASE_EMBEDDED {
// fast api call builtin.
void AnalyzePossibleApiFunction(JSFunction* function) {
SharedFunctionInfo* sfi = function->shared();
- if (sfi->function_data()->IsUndefined()) return;
- FunctionTemplateInfo* info =
- FunctionTemplateInfo::cast(sfi->function_data());
+ if (!sfi->IsApiFunction()) return;
+ FunctionTemplateInfo* info = sfi->get_api_func_data();
// Require a C++ callback.
if (info->call_code()->IsUndefined()) return;
@@ -698,8 +697,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
CallOptimization optimization(lookup);
- if (optimization.is_constant_call() &&
- !Top::CanHaveSpecialFunctions(holder)) {
+ if (optimization.is_constant_call()) {
CompileCacheable(masm,
object,
receiver,
@@ -1211,6 +1209,195 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
}
+Object* CallStubCompiler::CompileArrayPushCall(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+ ASSERT(check == RECEIVER_MAP_CHECK);
+
+ Label miss;
+
+ // Get the receiver from the stack.
+ const int argc = arguments().immediate();
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &miss);
+
+ CheckPrototypes(JSObject::cast(object), edx,
+ holder, ebx,
+ eax, name, &miss);
+
+ if (argc == 0) {
+ // Noop, return the length.
+ __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
+ __ ret((argc + 1) * kPointerSize);
+ } else {
+ // Get the elements array of the object.
+ __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
+
+ // Check that the elements are in fast mode (not dictionary).
+ __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ Immediate(Factory::fixed_array_map()));
+ __ j(not_equal, &miss);
+
+ if (argc == 1) { // Otherwise fall through to call builtin.
+ Label call_builtin, exit, with_rset_update;
+
+ // Get the array's length into eax and calculate new length.
+ __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ add(Operand(eax), Immediate(argc << 1));
+
+ // Get the element's length into ecx.
+ __ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
+ __ SmiTag(ecx);
+
+ // Check if we could survive without allocation, go to builtin otherwise.
+ __ cmp(eax, Operand(ecx));
+ __ j(greater, &call_builtin);
+
+ // Save new length.
+ __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
+
+ // Push the element.
+ __ lea(edx, FieldOperand(ebx,
+ eax, times_half_pointer_size,
+ FixedArray::kHeaderSize - argc * kPointerSize));
+ __ mov(ecx, Operand(esp, argc * kPointerSize));
+ __ mov(Operand(edx, 0), ecx);
+
+ // Check if value is a smi.
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &with_rset_update);
+
+ __ bind(&exit);
+ __ ret((argc + 1) * kPointerSize);
+
+ __ bind(&with_rset_update);
+
+ __ InNewSpace(ebx, ecx, equal, &exit);
+
+ RecordWriteStub stub(ebx, edx, ecx);
+ __ CallStub(&stub);
+ __ ret((argc + 1) * kPointerSize);
+
+ __ bind(&call_builtin);
+ }
+
+ __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush),
+ argc + 1,
+ 1);
+ }
+
+ __ bind(&miss);
+
+ Handle ic = ComputeCallMiss(arguments().immediate());
+ __ jmp(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ String* function_name = NULL;
+ if (function->shared()->name()->IsString()) {
+ function_name = String::cast(function->shared()->name());
+ }
+ return GetCode(CONSTANT_FUNCTION, function_name);
+}
+
+
+Object* CallStubCompiler::CompileArrayPopCall(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+ ASSERT(check == RECEIVER_MAP_CHECK);
+
+ Label miss, empty_array, call_builtin;
+
+ // Get the receiver from the stack.
+ const int argc = arguments().immediate();
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &miss);
+
+ CheckPrototypes(JSObject::cast(object), edx,
+ holder, ebx,
+ eax, name, &miss);
+
+ // Get the elements array of the object.
+ __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
+
+ // Check that the elements are in fast mode (not dictionary).
+ __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ Immediate(Factory::fixed_array_map()));
+ __ j(not_equal, &miss);
+
+ // Get the array's length into ecx and calculate new length.
+ __ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset));
+ __ sub(Operand(ecx), Immediate(Smi::FromInt(1)));
+ __ j(negative, &empty_array);
+
+ // Get the last element.
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ mov(eax, FieldOperand(ebx,
+ ecx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(Operand(eax), Immediate(Factory::the_hole_value()));
+ __ j(equal, &call_builtin);
+
+ // Set the array's length.
+ __ mov(FieldOperand(edx, JSArray::kLengthOffset), ecx);
+
+ // Fill with the hole.
+ __ mov(FieldOperand(ebx,
+ ecx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ Immediate(Factory::the_hole_value()));
+ __ ret((argc + 1) * kPointerSize);
+
+ __ bind(&empty_array);
+ __ mov(eax, Immediate(Factory::undefined_value()));
+ __ ret((argc + 1) * kPointerSize);
+
+ __ bind(&call_builtin);
+
+ __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop),
+ argc + 1,
+ 1);
+
+ __ bind(&miss);
+
+ Handle ic = ComputeCallMiss(arguments().immediate());
+ __ jmp(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ String* function_name = NULL;
+ if (function->shared()->name()->IsString()) {
+ function_name = String::cast(function->shared()->name());
+ }
+ return GetCode(CONSTANT_FUNCTION, function_name);
+}
+
+
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
@@ -1223,6 +1410,14 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
+
+ SharedFunctionInfo* function_info = function->shared();
+ if (function_info->HasCustomCallGenerator()) {
+ CustomCallGenerator generator =
+ ToCData(function_info->function_data());
+ return generator(this, object, holder, function, name, check);
+ }
+
Label miss_in_smi_check;
// Get the receiver from the stack.
@@ -1333,18 +1528,6 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
break;
}
- case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
- CheckPrototypes(JSObject::cast(object), edx, holder,
- ebx, eax, name, &miss);
- // Make sure object->HasFastElements().
- // Get the elements array of the object.
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
- // Check that the object is in fast mode (not dictionary).
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(Factory::fixed_array_map()));
- __ j(not_equal, &miss, not_taken);
- break;
-
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.cc b/deps/v8/src/ia32/virtual-frame-ia32.cc
index 7b03a5b2f7..a749c78f8a 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.cc
+++ b/deps/v8/src/ia32/virtual-frame-ia32.cc
@@ -1171,11 +1171,17 @@ void VirtualFrame::Push(Expression* expr) {
}
VariableProxy* proxy = expr->AsVariableProxy();
- if (proxy != NULL && proxy->is_this()) {
- PushParameterAt(-1);
- return;
+ if (proxy != NULL) {
+ Slot* slot = proxy->var()->slot();
+ if (slot->type() == Slot::LOCAL) {
+ PushLocalAt(slot->index());
+ return;
+ }
+ if (slot->type() == Slot::PARAMETER) {
+ PushParameterAt(slot->index());
+ return;
+ }
}
-
UNREACHABLE();
}
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.h b/deps/v8/src/ia32/virtual-frame-ia32.h
index cd2d18f432..042f9b7a4a 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.h
+++ b/deps/v8/src/ia32/virtual-frame-ia32.h
@@ -422,6 +422,9 @@ class VirtualFrame: public ZoneObject {
// the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
inline void Nip(int num_dropped);
+ // Update the type information of a local variable frame element directly.
+ inline void SetTypeForLocalAt(int index, NumberInfo info);
+
private:
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index f82e61e47b..2b97a8bf78 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -150,7 +150,7 @@ IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
// the receiver map's code cache. Therefore, if the current target
// is in the receiver map's code cache, the inline cache failed due
// to prototype check failure.
- int index = map->IndexInCodeCache(String::cast(name), target);
+ int index = map->IndexInCodeCache(name, target);
if (index >= 0) {
// For keyed load/store, the most likely cause of cache failure is
// that the key has changed. We do not distinguish between
@@ -458,17 +458,6 @@ Object* CallIC::LoadFunction(State state,
ASSERT(result != Heap::the_hole_value());
if (result->IsJSFunction()) {
- // Check if there is an optimized (builtin) version of the function.
- // Ignored this will degrade performance for some Array functions.
- // Please note we only return the optimized function iff
- // the JSObject has FastElements.
- if (object->IsJSObject() && JSObject::cast(*object)->HasFastElements()) {
- Object* opt = Top::LookupSpecialFunction(JSObject::cast(*object),
- lookup.holder(),
- JSFunction::cast(result));
- if (opt->IsJSFunction()) return opt;
- }
-
#ifdef ENABLE_DEBUGGER_SUPPORT
// Handle stepping into a function if step into is active.
if (Debug::StepInActive()) {
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index 722e0fc042..62f0ca62f4 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -196,6 +196,9 @@ int Log::GetLogLines(int from_pos, char* dest_buf, int max_size) {
char* end_pos = dest_buf + actual_size - 1;
while (end_pos >= dest_buf && *end_pos != '\n') --end_pos;
actual_size = static_cast(end_pos - dest_buf + 1);
+ // If the assertion below is hit, it means that there was no line end
+ // found --- something wrong has happened.
+ ASSERT(actual_size > 0);
ASSERT(actual_size <= max_size);
return actual_size;
}
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index b769e9046f..8889f1b77a 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -115,7 +115,7 @@ class Log : public AllStatic {
}
// Size of buffer used for formatting log messages.
- static const int kMessageBufferSize = 2048;
+ static const int kMessageBufferSize = v8::V8::kMinimumSizeForLogLinesBuffer;
private:
typedef int (*WritePtr)(const char* msg, int length);
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 588d345499..4441875507 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -1346,10 +1346,9 @@ void Logger::LogCompiledFunctions() {
LOG(CodeCreateEvent(
Logger::LAZY_COMPILE_TAG, shared->code(), *func_name));
}
- } else if (shared->function_data()->IsFunctionTemplateInfo()) {
+ } else if (shared->IsApiFunction()) {
// API function.
- FunctionTemplateInfo* fun_data =
- FunctionTemplateInfo::cast(shared->function_data());
+ FunctionTemplateInfo* fun_data = shared->get_api_func_data();
Object* raw_call_data = fun_data->call_code();
if (!raw_call_data->IsUndefined()) {
CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index 9da2552479..122b057a93 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -136,9 +136,9 @@ macro NUMBER_OF_CAPTURES(array) = ((array)[0]);
# a type error is thrown.
macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError());
macro DAY(time) = ($floor(time / 86400000));
-macro MONTH_FROM_TIME(time) = (FromJulianDay(($floor(time / 86400000)) + 2440588).month);
-macro DATE_FROM_TIME(time) = (FromJulianDay(($floor(time / 86400000)) + 2440588).date);
-macro YEAR_FROM_TIME(time) = (FromJulianDay(($floor(time / 86400000)) + 2440588).year);
+macro MONTH_FROM_TIME(time) = (MonthFromTime(time));
+macro DATE_FROM_TIME(time) = (DateFromTime(time));
+macro YEAR_FROM_TIME(time) = (YearFromTime(time));
macro HOUR_FROM_TIME(time) = (Modulo($floor(time / 3600000), 24));
macro MIN_FROM_TIME(time) = (Modulo($floor(time / 60000), 60));
macro SEC_FROM_TIME(time) = (Modulo($floor(time / 1000), 60));
diff --git a/deps/v8/src/math.js b/deps/v8/src/math.js
index 034f32d7fc..0623cb82dd 100644
--- a/deps/v8/src/math.js
+++ b/deps/v8/src/math.js
@@ -45,7 +45,8 @@ $Math.__proto__ = global.Object.prototype;
function MathAbs(x) {
if (%_IsSmi(x)) return x >= 0 ? x : -x;
if (!IS_NUMBER(x)) x = ToNumber(x);
- return %Math_abs(x);
+ if (x === 0) return 0; // To handle -0.
+ return x > 0 ? x : -x;
}
// ECMA 262 - 15.8.2.2
@@ -84,7 +85,7 @@ function MathCeil(x) {
// ECMA 262 - 15.8.2.7
function MathCos(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
- return %_Math_cos(x);
+ return %_MathCos(x);
}
// ECMA 262 - 15.8.2.8
@@ -159,7 +160,7 @@ function MathMin(arg1, arg2) { // length == 2
function MathPow(x, y) {
if (!IS_NUMBER(x)) x = ToNumber(x);
if (!IS_NUMBER(y)) y = ToNumber(y);
- return %_Math_pow(x, y);
+ return %_MathPow(x, y);
}
// ECMA 262 - 15.8.2.14
@@ -176,13 +177,13 @@ function MathRound(x) {
// ECMA 262 - 15.8.2.16
function MathSin(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
- return %_Math_sin(x);
+ return %_MathSin(x);
}
// ECMA 262 - 15.8.2.17
function MathSqrt(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
- return %_Math_sqrt(x);
+ return %_MathSqrt(x);
}
// ECMA 262 - 15.8.2.18
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index 5848115059..cb392ff90f 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -182,7 +182,8 @@ function FormatMessage(message) {
invalid_json: "String '%0' is not valid JSON",
circular_structure: "Converting circular structure to JSON",
obj_ctor_property_non_object: "Object.%0 called on non-object",
- array_indexof_not_defined: "Array.getIndexOf: Argument undefined"
+ array_indexof_not_defined: "Array.getIndexOf: Argument undefined",
+ illegal_access: "illegal access"
};
}
var format = kMessages[message.type];
diff --git a/deps/v8/src/mips/codegen-mips-inl.h b/deps/v8/src/mips/codegen-mips-inl.h
index 2a77715a3b..904dd74e9d 100644
--- a/deps/v8/src/mips/codegen-mips-inl.h
+++ b/deps/v8/src/mips/codegen-mips-inl.h
@@ -38,16 +38,6 @@ namespace internal {
void DeferredCode::Jump() { __ b(&entry_label_); }
-void CodeGenerator::GenerateMathSin(ZoneList* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index facccc288d..7b32180efb 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -31,6 +31,7 @@
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "debug.h"
+#include "ic-inl.h"
#include "parser.h"
#include "register-allocator-inl.h"
#include "runtime.h"
@@ -297,6 +298,16 @@ void CodeGenerator::GenerateMathPow(ZoneList* args) {
}
+void CodeGenerator::GenerateMathCos(ZoneList