Browse Source

Update v8 to 3.9.9

v0.9.1-release
isaacs 13 years ago
parent
commit
f4641bd4de
  1. 8
      deps/v8/.gitignore
  2. 4
      deps/v8/AUTHORS
  3. 55
      deps/v8/ChangeLog
  4. 5
      deps/v8/SConstruct
  5. 4
      deps/v8/benchmarks/base.js
  6. 387
      deps/v8/benchmarks/navier-stokes.js
  7. 2
      deps/v8/benchmarks/run.html
  8. 1
      deps/v8/benchmarks/run.js
  9. 6
      deps/v8/build/common.gypi
  10. 3
      deps/v8/build/standalone.gypi
  11. 2
      deps/v8/include/v8.h
  12. 2
      deps/v8/preparser/preparser-process.cc
  13. 4
      deps/v8/src/api.cc
  14. 4
      deps/v8/src/apinatives.js
  15. 35
      deps/v8/src/arm/builtins-arm.cc
  16. 44
      deps/v8/src/arm/code-stubs-arm.cc
  17. 55
      deps/v8/src/arm/codegen-arm.cc
  18. 69
      deps/v8/src/arm/full-codegen-arm.cc
  19. 46
      deps/v8/src/arm/lithium-arm.cc
  20. 93
      deps/v8/src/arm/lithium-arm.h
  21. 161
      deps/v8/src/arm/lithium-codegen-arm.cc
  22. 66
      deps/v8/src/arm/macro-assembler-arm.cc
  23. 8
      deps/v8/src/arm/macro-assembler-arm.h
  24. 4
      deps/v8/src/arm/simulator-arm.cc
  25. 186
      deps/v8/src/arm/stub-cache-arm.cc
  26. 62
      deps/v8/src/array.js
  27. 22
      deps/v8/src/ast.cc
  28. 72
      deps/v8/src/ast.h
  29. 161
      deps/v8/src/bootstrapper.cc
  30. 12
      deps/v8/src/builtins.cc
  31. 2
      deps/v8/src/char-predicates.h
  32. 15
      deps/v8/src/code-stubs.cc
  33. 48
      deps/v8/src/code-stubs.h
  34. 7
      deps/v8/src/collection.js
  35. 36
      deps/v8/src/compiler.cc
  36. 12
      deps/v8/src/compiler.h
  37. 4
      deps/v8/src/contexts.h
  38. 24
      deps/v8/src/d8.js
  39. 10
      deps/v8/src/data-flow.h
  40. 5
      deps/v8/src/date.js
  41. 6
      deps/v8/src/debug-debugger.js
  42. 4
      deps/v8/src/deoptimizer.h
  43. 94
      deps/v8/src/elements.cc
  44. 7
      deps/v8/src/execution.cc
  45. 15
      deps/v8/src/factory.cc
  46. 2
      deps/v8/src/factory.h
  47. 27
      deps/v8/src/flag-definitions.h
  48. 22
      deps/v8/src/full-codegen.cc
  49. 46
      deps/v8/src/full-codegen.h
  50. 7
      deps/v8/src/globals.h
  51. 39
      deps/v8/src/handles.cc
  52. 61
      deps/v8/src/heap.cc
  53. 15
      deps/v8/src/heap.h
  54. 76
      deps/v8/src/hydrogen-instructions.cc
  55. 274
      deps/v8/src/hydrogen-instructions.h
  56. 421
      deps/v8/src/hydrogen.cc
  57. 21
      deps/v8/src/hydrogen.h
  58. 18
      deps/v8/src/ia32/assembler-ia32.cc
  59. 42
      deps/v8/src/ia32/builtins-ia32.cc
  60. 39
      deps/v8/src/ia32/code-stubs-ia32.cc
  61. 49
      deps/v8/src/ia32/codegen-ia32.cc
  62. 41
      deps/v8/src/ia32/deoptimizer-ia32.cc
  63. 177
      deps/v8/src/ia32/full-codegen-ia32.cc
  64. 6
      deps/v8/src/ia32/ic-ia32.cc
  65. 161
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  66. 48
      deps/v8/src/ia32/lithium-ia32.cc
  67. 104
      deps/v8/src/ia32/lithium-ia32.h
  68. 57
      deps/v8/src/ia32/macro-assembler-ia32.cc
  69. 9
      deps/v8/src/ia32/macro-assembler-ia32.h
  70. 199
      deps/v8/src/ia32/stub-cache-ia32.cc
  71. 7
      deps/v8/src/ic-inl.h
  72. 197
      deps/v8/src/ic.cc
  73. 82
      deps/v8/src/ic.h
  74. 2
      deps/v8/src/isolate.cc
  75. 2
      deps/v8/src/isolate.h
  76. 30
      deps/v8/src/jsregexp.cc
  77. 119
      deps/v8/src/lithium-allocator.cc
  78. 46
      deps/v8/src/lithium-allocator.h
  79. 5
      deps/v8/src/lithium.h
  80. 7
      deps/v8/src/macro-assembler.h
  81. 6
      deps/v8/src/mark-compact.cc
  82. 8
      deps/v8/src/math.js
  83. 27
      deps/v8/src/messages.js
  84. 39
      deps/v8/src/mips/builtins-mips.cc
  85. 60
      deps/v8/src/mips/code-stubs-mips.cc
  86. 56
      deps/v8/src/mips/codegen-mips.cc
  87. 50
      deps/v8/src/mips/deoptimizer-mips.cc
  88. 50
      deps/v8/src/mips/full-codegen-mips.cc
  89. 80
      deps/v8/src/mips/lithium-codegen-mips.cc
  90. 18
      deps/v8/src/mips/lithium-mips.cc
  91. 30
      deps/v8/src/mips/lithium-mips.h
  92. 20
      deps/v8/src/mips/macro-assembler-mips.cc
  93. 4
      deps/v8/src/mips/macro-assembler-mips.h
  94. 4
      deps/v8/src/mips/simulator-mips.cc
  95. 188
      deps/v8/src/mips/stub-cache-mips.cc
  96. 74
      deps/v8/src/mirror-debugger.js
  97. 13
      deps/v8/src/objects-debug.cc
  98. 40
      deps/v8/src/objects-inl.h
  99. 9
      deps/v8/src/objects-printer.cc
  100. 4
      deps/v8/src/objects-visiting-inl.h

8
deps/v8/.gitignore

@ -23,10 +23,10 @@ shell_g
/build/gyp /build/gyp
/obj/ /obj/
/out/ /out/
/test/es5conform/data/ /test/es5conform/data
/test/mozilla/data/ /test/mozilla/data
/test/sputnik/sputniktests/ /test/sputnik/sputniktests
/test/test262/data/ /test/test262/data
/tools/oom_dump/oom_dump /tools/oom_dump/oom_dump
/tools/oom_dump/oom_dump.o /tools/oom_dump/oom_dump.o
/tools/visual_studio/Debug /tools/visual_studio/Debug

4
deps/v8/AUTHORS

@ -23,14 +23,18 @@ Daniel James <dnljms@gmail.com>
Dineel D Sule <dsule@codeaurora.org> Dineel D Sule <dsule@codeaurora.org>
Erich Ocean <erich.ocean@me.com> Erich Ocean <erich.ocean@me.com>
Fedor Indutny <fedor@indutny.com> Fedor Indutny <fedor@indutny.com>
Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
Jan de Mooij <jandemooij@gmail.com> Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com> Jay Freeman <saurik@saurik.com>
Joel Stanley <joel.stan@gmail.com> Joel Stanley <joel.stan@gmail.com>
John Jozwiak <jjozwiak@codeaurora.org> John Jozwiak <jjozwiak@codeaurora.org>
Jonathan Liu <net147@gmail.com>
Kun Zhang <zhangk@codeaurora.org> Kun Zhang <zhangk@codeaurora.org>
Martyn Capewell <martyn.capewell@arm.com> Martyn Capewell <martyn.capewell@arm.com>
Mathias Bynens <mathias@qiwi.be>
Matt Hanselman <mjhanselman@gmail.com> Matt Hanselman <mjhanselman@gmail.com>
Maxim Mossienko <maxim.mossienko@gmail.com> Maxim Mossienko <maxim.mossienko@gmail.com>
Michael Lutz <michi@icosahedron.de>
Michael Smith <mike@w3.org> Michael Smith <mike@w3.org>
Mike Gilbert <floppymaster@gmail.com> Mike Gilbert <floppymaster@gmail.com>
Paolo Giarrusso <p.giarrusso@gmail.com> Paolo Giarrusso <p.giarrusso@gmail.com>

55
deps/v8/ChangeLog

@ -1,3 +1,58 @@
2012-02-23: Version 3.9.9
Supported fast case for-in in Crankshaft.
Sped up heap snapshot serialization and dominators construction.
Randomized allocation addresses on windows. (Chromium issue 115151)
Fixed compilation with MinGW-w64. (issue 1943)
Fixed incorrect value of assignments to non-extensible properties.
Fixed a crash bug in generated code on ia32.
Performance and stability improvements on all platforms.
2012-02-21: Version 3.9.8
Fixed memory leak and missing #include in StartupDataDecompressor
(issue 1960).
Renamed static methods to avoid shadowing virtual methods and fix Clang
C++11 compile error.
Fixed sequence of element access in array builtins (issue 1790).
Performance and stability improvements on all platforms.
2012-02-16: Version 3.9.7
Fixed V8 issues 1322, 1878, 1942, 1945 and Chromium issue 113924.
Fixed GCC-4.7 warnings.
Added Navier-Stokes benchmark.
Performance and stability improvements on all platforms.
2012-02-14: Version 3.9.6
Fix template-related linker error. (issue 1936)
Allow inlining of functions containing object literals. (issue 1322)
Add --call-graph-size option to tickprocessor. (issue 1937)
Heap Snapshot maximum size limit is too low for really big apps. At the
moment the limit is 256MB. (Chromium issue 113015)
Performance and stability improvements on all platforms.
2012-02-09: Version 3.9.5 2012-02-09: Version 3.9.5
Removed unused command line flags. Removed unused command line flags.

5
deps/v8/SConstruct

@ -1,4 +1,4 @@
# Copyright 2011 the V8 project authors. All rights reserved. # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without # Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are # modification, are permitted provided that the following conditions are
# met: # met:
@ -296,10 +296,11 @@ V8_EXTRA_FLAGS = {
'-Werror', '-Werror',
'-W', '-W',
'-Wno-unused-parameter', '-Wno-unused-parameter',
'-Woverloaded-virtual',
'-Wnon-virtual-dtor'] '-Wnon-virtual-dtor']
}, },
'os:win32': { 'os:win32': {
'WARNINGFLAGS': ['-pedantic', '-Wno-long-long'] 'WARNINGFLAGS': ['-pedantic', '-Wno-long-long', '-Wno-pedantic-ms-format']
}, },
'os:linux': { 'os:linux': {
'WARNINGFLAGS': ['-pedantic'], 'WARNINGFLAGS': ['-pedantic'],

4
deps/v8/benchmarks/base.js

@ -1,4 +1,4 @@
// Copyright 2008 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -78,7 +78,7 @@ BenchmarkSuite.suites = [];
// Scores are not comparable across versions. Bump the version if // Scores are not comparable across versions. Bump the version if
// you're making changes that will affect that scores, e.g. if you add // you're making changes that will affect that scores, e.g. if you add
// a new benchmark or change an existing one. // a new benchmark or change an existing one.
BenchmarkSuite.version = '6'; BenchmarkSuite.version = '7';
// To make the benchmark results predictable, we replace Math.random // To make the benchmark results predictable, we replace Math.random

387
deps/v8/benchmarks/navier-stokes.js

@ -0,0 +1,387 @@
/**
* Copyright 2012 the V8 project authors. All rights reserved.
* Copyright 2009 Oliver Hunt <http://nerget.com>
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
var NavierStokes = new BenchmarkSuite('NavierStokes', 1484000,
[new Benchmark('NavierStokes',
runNavierStokes,
setupNavierStokes,
tearDownNavierStokes)]);
var solver = null;
function runNavierStokes()
{
solver.update();
}
function setupNavierStokes()
{
solver = new FluidField(null);
solver.setResolution(128, 128);
solver.setIterations(20);
solver.setDisplayFunction(function(){});
solver.setUICallback(prepareFrame);
solver.reset();
}
function tearDownNavierStokes()
{
solver = null;
}
function addPoints(field) {
var n = 64;
for (var i = 1; i <= n; i++) {
field.setVelocity(i, i, n, n);
field.setDensity(i, i, 5);
field.setVelocity(i, n - i, -n, -n);
field.setDensity(i, n - i, 20);
field.setVelocity(128 - i, n + i, -n, -n);
field.setDensity(128 - i, n + i, 30);
}
}
var framesTillAddingPoints = 0;
var framesBetweenAddingPoints = 5;
function prepareFrame(field)
{
if (framesTillAddingPoints == 0) {
addPoints(field);
framesTillAddingPoints = framesBetweenAddingPoints;
framesBetweenAddingPoints++;
} else {
framesTillAddingPoints--;
}
}
// Code from Oliver Hunt (http://nerget.com/fluidSim/pressure.js) starts here.
function FluidField(canvas) {
function addFields(x, s, dt)
{
for (var i=0; i<size ; i++ ) x[i] += dt*s[i];
}
function set_bnd(b, x)
{
if (b===1) {
for (var i = 1; i <= width; i++) {
x[i] = x[i + rowSize];
x[i + (height+1) *rowSize] = x[i + height * rowSize];
}
for (var j = 1; i <= height; i++) {
x[j * rowSize] = -x[1 + j * rowSize];
x[(width + 1) + j * rowSize] = -x[width + j * rowSize];
}
} else if (b === 2) {
for (var i = 1; i <= width; i++) {
x[i] = -x[i + rowSize];
x[i + (height + 1) * rowSize] = -x[i + height * rowSize];
}
for (var j = 1; j <= height; j++) {
x[j * rowSize] = x[1 + j * rowSize];
x[(width + 1) + j * rowSize] = x[width + j * rowSize];
}
} else {
for (var i = 1; i <= width; i++) {
x[i] = x[i + rowSize];
x[i + (height + 1) * rowSize] = x[i + height * rowSize];
}
for (var j = 1; j <= height; j++) {
x[j * rowSize] = x[1 + j * rowSize];
x[(width + 1) + j * rowSize] = x[width + j * rowSize];
}
}
var maxEdge = (height + 1) * rowSize;
x[0] = 0.5 * (x[1] + x[rowSize]);
x[maxEdge] = 0.5 * (x[1 + maxEdge] + x[height * rowSize]);
x[(width+1)] = 0.5 * (x[width] + x[(width + 1) + rowSize]);
x[(width+1)+maxEdge] = 0.5 * (x[width + maxEdge] + x[(width + 1) + height * rowSize]);
}
function lin_solve(b, x, x0, a, c)
{
if (a === 0 && c === 1) {
for (var j=1 ; j<=height; j++) {
var currentRow = j * rowSize;
++currentRow;
for (var i = 0; i < width; i++) {
x[currentRow] = x0[currentRow];
++currentRow;
}
}
set_bnd(b, x);
} else {
var invC = 1 / c;
for (var k=0 ; k<iterations; k++) {
for (var j=1 ; j<=height; j++) {
var lastRow = (j - 1) * rowSize;
var currentRow = j * rowSize;
var nextRow = (j + 1) * rowSize;
var lastX = x[currentRow];
++currentRow;
for (var i=1; i<=width; i++)
lastX = x[currentRow] = (x0[currentRow] + a*(lastX+x[++currentRow]+x[++lastRow]+x[++nextRow])) * invC;
}
set_bnd(b, x);
}
}
}
function diffuse(b, x, x0, dt)
{
var a = 0;
lin_solve(b, x, x0, a, 1 + 4*a);
}
function lin_solve2(x, x0, y, y0, a, c)
{
if (a === 0 && c === 1) {
for (var j=1 ; j <= height; j++) {
var currentRow = j * rowSize;
++currentRow;
for (var i = 0; i < width; i++) {
x[currentRow] = x0[currentRow];
y[currentRow] = y0[currentRow];
++currentRow;
}
}
set_bnd(1, x);
set_bnd(2, y);
} else {
var invC = 1/c;
for (var k=0 ; k<iterations; k++) {
for (var j=1 ; j <= height; j++) {
var lastRow = (j - 1) * rowSize;
var currentRow = j * rowSize;
var nextRow = (j + 1) * rowSize;
var lastX = x[currentRow];
var lastY = y[currentRow];
++currentRow;
for (var i = 1; i <= width; i++) {
lastX = x[currentRow] = (x0[currentRow] + a * (lastX + x[currentRow] + x[lastRow] + x[nextRow])) * invC;
lastY = y[currentRow] = (y0[currentRow] + a * (lastY + y[++currentRow] + y[++lastRow] + y[++nextRow])) * invC;
}
}
set_bnd(1, x);
set_bnd(2, y);
}
}
}
function diffuse2(x, x0, y, y0, dt)
{
var a = 0;
lin_solve2(x, x0, y, y0, a, 1 + 4 * a);
}
function advect(b, d, d0, u, v, dt)
{
var Wdt0 = dt * width;
var Hdt0 = dt * height;
var Wp5 = width + 0.5;
var Hp5 = height + 0.5;
for (var j = 1; j<= height; j++) {
var pos = j * rowSize;
for (var i = 1; i <= width; i++) {
var x = i - Wdt0 * u[++pos];
var y = j - Hdt0 * v[pos];
if (x < 0.5)
x = 0.5;
else if (x > Wp5)
x = Wp5;
var i0 = x | 0;
var i1 = i0 + 1;
if (y < 0.5)
y = 0.5;
else if (y > Hp5)
y = Hp5;
var j0 = y | 0;
var j1 = j0 + 1;
var s1 = x - i0;
var s0 = 1 - s1;
var t1 = y - j0;
var t0 = 1 - t1;
var row1 = j0 * rowSize;
var row2 = j1 * rowSize;
d[pos] = s0 * (t0 * d0[i0 + row1] + t1 * d0[i0 + row2]) + s1 * (t0 * d0[i1 + row1] + t1 * d0[i1 + row2]);
}
}
set_bnd(b, d);
}
function project(u, v, p, div)
{
var h = -0.5 / Math.sqrt(width * height);
for (var j = 1 ; j <= height; j++ ) {
var row = j * rowSize;
var previousRow = (j - 1) * rowSize;
var prevValue = row - 1;
var currentRow = row;
var nextValue = row + 1;
var nextRow = (j + 1) * rowSize;
for (var i = 1; i <= width; i++ ) {
div[++currentRow] = h * (u[++nextValue] - u[++prevValue] + v[++nextRow] - v[++previousRow]);
p[currentRow] = 0;
}
}
set_bnd(0, div);
set_bnd(0, p);
lin_solve(0, p, div, 1, 4 );
var wScale = 0.5 * width;
var hScale = 0.5 * height;
for (var j = 1; j<= height; j++ ) {
var prevPos = j * rowSize - 1;
var currentPos = j * rowSize;
var nextPos = j * rowSize + 1;
var prevRow = (j - 1) * rowSize;
var currentRow = j * rowSize;
var nextRow = (j + 1) * rowSize;
for (var i = 1; i<= width; i++) {
u[++currentPos] -= wScale * (p[++nextPos] - p[++prevPos]);
v[currentPos] -= hScale * (p[++nextRow] - p[++prevRow]);
}
}
set_bnd(1, u);
set_bnd(2, v);
}
function dens_step(x, x0, u, v, dt)
{
addFields(x, x0, dt);
diffuse(0, x0, x, dt );
advect(0, x, x0, u, v, dt );
}
function vel_step(u, v, u0, v0, dt)
{
addFields(u, u0, dt );
addFields(v, v0, dt );
var temp = u0; u0 = u; u = temp;
var temp = v0; v0 = v; v = temp;
diffuse2(u,u0,v,v0, dt);
project(u, v, u0, v0);
var temp = u0; u0 = u; u = temp;
var temp = v0; v0 = v; v = temp;
advect(1, u, u0, u0, v0, dt);
advect(2, v, v0, u0, v0, dt);
project(u, v, u0, v0 );
}
var uiCallback = function(d,u,v) {};
function Field(dens, u, v) {
// Just exposing the fields here rather than using accessors is a measurable win during display (maybe 5%)
// but makes the code ugly.
this.setDensity = function(x, y, d) {
dens[(x + 1) + (y + 1) * rowSize] = d;
}
this.getDensity = function(x, y) {
return dens[(x + 1) + (y + 1) * rowSize];
}
this.setVelocity = function(x, y, xv, yv) {
u[(x + 1) + (y + 1) * rowSize] = xv;
v[(x + 1) + (y + 1) * rowSize] = yv;
}
this.getXVelocity = function(x, y) {
return u[(x + 1) + (y + 1) * rowSize];
}
this.getYVelocity = function(x, y) {
return v[(x + 1) + (y + 1) * rowSize];
}
this.width = function() { return width; }
this.height = function() { return height; }
}
function queryUI(d, u, v)
{
for (var i = 0; i < size; i++)
u[i] = v[i] = d[i] = 0.0;
uiCallback(new Field(d, u, v));
}
this.update = function () {
queryUI(dens_prev, u_prev, v_prev);
vel_step(u, v, u_prev, v_prev, dt);
dens_step(dens, dens_prev, u, v, dt);
displayFunc(new Field(dens, u, v));
}
this.setDisplayFunction = function(func) {
displayFunc = func;
}
this.iterations = function() { return iterations; }
this.setIterations = function(iters) {
if (iters > 0 && iters <= 100)
iterations = iters;
}
this.setUICallback = function(callback) {
uiCallback = callback;
}
var iterations = 10;
var visc = 0.5;
var dt = 0.1;
var dens;
var dens_prev;
var u;
var u_prev;
var v;
var v_prev;
var width;
var height;
var rowSize;
var size;
var displayFunc;
function reset()
{
rowSize = width + 2;
size = (width+2)*(height+2);
dens = new Array(size);
dens_prev = new Array(size);
u = new Array(size);
u_prev = new Array(size);
v = new Array(size);
v_prev = new Array(size);
for (var i = 0; i < size; i++)
dens_prev[i] = u_prev[i] = v_prev[i] = dens[i] = u[i] = v[i] = 0;
}
this.reset = reset;
this.setResolution = function (hRes, wRes)
{
var res = wRes * hRes;
if (res > 0 && res < 1000000 && (wRes != width || hRes != height)) {
width = wRes;
height = hRes;
reset();
return true;
}
return false;
}
this.setResolution(64, 64);
}

2
deps/v8/benchmarks/run.html

@ -14,6 +14,7 @@
<script type="text/javascript" src="earley-boyer.js"></script> <script type="text/javascript" src="earley-boyer.js"></script>
<script type="text/javascript" src="regexp.js"></script> <script type="text/javascript" src="regexp.js"></script>
<script type="text/javascript" src="splay.js"></script> <script type="text/javascript" src="splay.js"></script>
<script type="text/javascript" src="navier-stokes.js"></script>
<link type="text/css" rel="stylesheet" href="style.css" /> <link type="text/css" rel="stylesheet" href="style.css" />
<script type="text/javascript"> <script type="text/javascript">
var completed = 0; var completed = 0;
@ -117,6 +118,7 @@ higher scores means better performance: <em>Bigger is better!</em>
(<i>1761 lines</i>). (<i>1761 lines</i>).
</li> </li>
<li><b>Splay</b><br>Data manipulation benchmark that deals with splay trees and exercises the automatic memory management subsystem (<i>394 lines</i>).</li> <li><b>Splay</b><br>Data manipulation benchmark that deals with splay trees and exercises the automatic memory management subsystem (<i>394 lines</i>).</li>
<li><b>NavierStokes (beta)</b><br>Solves NavierStokes equations in 2D, heavily manipulating double precision arrays. Based on Oliver Hunt's code (<i>396 lines</i>).</li>
</ul> </ul>
<p> <p>

1
deps/v8/benchmarks/run.js

@ -34,6 +34,7 @@ load('raytrace.js');
load('earley-boyer.js'); load('earley-boyer.js');
load('regexp.js'); load('regexp.js');
load('splay.js'); load('splay.js');
load('navier-stokes.js');
var success = true; var success = true;

6
deps/v8/build/common.gypi

@ -83,7 +83,6 @@
'v8_use_snapshot%': 'true', 'v8_use_snapshot%': 'true',
'host_os%': '<(OS)', 'host_os%': '<(OS)',
'v8_use_liveobjectlist%': 'false', 'v8_use_liveobjectlist%': 'false',
'werror%': '-Werror',
# With post mortem support enabled, metadata is embedded into libv8 that # With post mortem support enabled, metadata is embedded into libv8 that
# describes various parameters of the VM for use by debuggers. See # describes various parameters of the VM for use by debuggers. See
@ -305,8 +304,8 @@
'cflags': [ '-I/usr/pkg/include' ], 'cflags': [ '-I/usr/pkg/include' ],
}], }],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', { ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', 'cflags': [ '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor' ], '-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
}], }],
], ],
}, # Debug }, # Debug
@ -352,6 +351,7 @@
}], # OS=="mac" }], # OS=="mac"
['OS=="win"', { ['OS=="win"', {
'msvs_configuration_attributes': { 'msvs_configuration_attributes': {
'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)', 'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1', 'CharacterSet': '1',
}, },

3
deps/v8/build/standalone.gypi

@ -61,7 +61,6 @@
'host_arch%': '<(host_arch)', 'host_arch%': '<(host_arch)',
'target_arch%': '<(target_arch)', 'target_arch%': '<(target_arch)',
'v8_target_arch%': '<(v8_target_arch)', 'v8_target_arch%': '<(v8_target_arch)',
'werror%': '-Werror',
'conditions': [ 'conditions': [
['(v8_target_arch=="arm" and host_arch!="arm") or \ ['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="mips" and host_arch!="mips") or \ (v8_target_arch=="mips" and host_arch!="mips") or \
@ -84,7 +83,7 @@
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd"', { or OS=="netbsd"', {
'target_defaults': { 'target_defaults': {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', 'cflags': [ '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-pthread', '-fno-rtti', '-Wnon-virtual-dtor', '-pthread', '-fno-rtti',
'-fno-exceptions', '-pedantic' ], '-fno-exceptions', '-pedantic' ],
'ldflags': [ '-pthread', ], 'ldflags': [ '-pthread', ],

2
deps/v8/include/v8.h

@ -3850,7 +3850,7 @@ class Internals {
static const int kFullStringRepresentationMask = 0x07; static const int kFullStringRepresentationMask = 0x07;
static const int kExternalTwoByteRepresentationTag = 0x02; static const int kExternalTwoByteRepresentationTag = 0x02;
static const int kJSObjectType = 0xa7; static const int kJSObjectType = 0xa8;
static const int kFirstNonstringType = 0x80; static const int kFirstNonstringType = 0x80;
static const int kForeignType = 0x85; static const int kForeignType = 0x85;

2
deps/v8/preparser/preparser-process.cc

@ -200,6 +200,7 @@ void fail(v8::PreParserData* data, const char* message, ...) {
vfprintf(stderr, message, args); vfprintf(stderr, message, args);
va_end(args); va_end(args);
fflush(stderr); fflush(stderr);
if (data != NULL) {
// Print preparser data to stdout. // Print preparser data to stdout.
uint32_t size = data->size(); uint32_t size = data->size();
fprintf(stderr, "LOG: data size: %u\n", size); fprintf(stderr, "LOG: data size: %u\n", size);
@ -207,6 +208,7 @@ void fail(v8::PreParserData* data, const char* message, ...) {
perror("ERROR: Writing data"); perror("ERROR: Writing data");
fflush(stderr); fflush(stderr);
} }
}
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }

4
deps/v8/src/api.cc

@ -42,6 +42,9 @@
#include "global-handles.h" #include "global-handles.h"
#include "heap-profiler.h" #include "heap-profiler.h"
#include "messages.h" #include "messages.h"
#ifdef COMPRESS_STARTUP_DATA_BZ2
#include "natives.h"
#endif
#include "parser.h" #include "parser.h"
#include "platform.h" #include "platform.h"
#include "profile-generator-inl.h" #include "profile-generator-inl.h"
@ -357,6 +360,7 @@ int StartupDataDecompressor::Decompress() {
compressed_data[i].data = decompressed; compressed_data[i].data = decompressed;
} }
V8::SetDecompressedStartupData(compressed_data); V8::SetDecompressedStartupData(compressed_data);
i::DeleteArray(compressed_data);
return 0; return 0;
} }

4
deps/v8/src/apinatives.js

@ -37,8 +37,8 @@ function CreateDate(time) {
} }
const kApiFunctionCache = {}; var kApiFunctionCache = {};
const functionCache = kApiFunctionCache; var functionCache = kApiFunctionCache;
function Instantiate(data, name) { function Instantiate(data, name) {

35
deps/v8/src/arm/builtins-arm.cc

@ -313,7 +313,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
Label* call_generic_code) { Label* call_generic_code) {
Counters* counters = masm->isolate()->counters(); Counters* counters = masm->isolate()->counters();
Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array, Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
has_non_smi_element; has_non_smi_element, finish, cant_transition_map, not_double;
// Check for array construction with zero arguments or one. // Check for array construction with zero arguments or one.
__ cmp(r0, Operand(0, RelocInfo::NONE)); __ cmp(r0, Operand(0, RelocInfo::NONE));
@ -418,6 +418,8 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ bind(&entry); __ bind(&entry);
__ cmp(r4, r5); __ cmp(r4, r5);
__ b(lt, &loop); __ b(lt, &loop);
__ bind(&finish);
__ mov(sp, r7); __ mov(sp, r7);
// Remove caller arguments and receiver from the stack, setup return value and // Remove caller arguments and receiver from the stack, setup return value and
@ -430,8 +432,39 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ Jump(lr); __ Jump(lr);
__ bind(&has_non_smi_element); __ bind(&has_non_smi_element);
// Double values are handled by the runtime.
__ CheckMap(
r2, r9, Heap::kHeapNumberMapRootIndex, &not_double, DONT_DO_SMI_CHECK);
__ bind(&cant_transition_map);
__ UndoAllocationInNewSpace(r3, r4); __ UndoAllocationInNewSpace(r3, r4);
__ b(call_generic_code); __ b(call_generic_code);
__ bind(&not_double);
// Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
// r3: JSArray
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
r2,
r9,
&cant_transition_map);
__ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
__ RecordWriteField(r3,
HeapObject::kMapOffset,
r2,
r9,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
Label loop2;
__ sub(r7, r7, Operand(kPointerSize));
__ bind(&loop2);
__ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
__ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
__ cmp(r4, r5);
__ b(lt, &loop2);
__ b(&finish);
} }

44
deps/v8/src/arm/code-stubs-arm.cc

@ -3439,6 +3439,11 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
} }
void InterruptStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kInterrupt, 0, 1);
}
void MathPowStub::Generate(MacroAssembler* masm) { void MathPowStub::Generate(MacroAssembler* masm) {
CpuFeatures::Scope vfp3_scope(VFP3); CpuFeatures::Scope vfp3_scope(VFP3);
const Register base = r1; const Register base = r1;
@ -3674,17 +3679,6 @@ void CEntryStub::GenerateAheadOfTime() {
} }
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ Throw(r0);
}
void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type) {
__ ThrowUncatchable(type, r0);
}
void CEntryStub::GenerateCore(MacroAssembler* masm, void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception, Label* throw_normal_exception,
Label* throw_termination_exception, Label* throw_termination_exception,
@ -3865,13 +3859,27 @@ void CEntryStub::Generate(MacroAssembler* masm) {
true); true);
__ bind(&throw_out_of_memory_exception); __ bind(&throw_out_of_memory_exception);
GenerateThrowUncatchable(masm, OUT_OF_MEMORY); // Set external caught exception to false.
Isolate* isolate = masm->isolate();
ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
isolate);
__ mov(r0, Operand(false, RelocInfo::NONE));
__ mov(r2, Operand(external_caught));
__ str(r0, MemOperand(r2));
// Set pending exception and r0 to out of memory exception.
Failure* out_of_memory = Failure::OutOfMemoryException();
__ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
__ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ str(r0, MemOperand(r2));
// Fall through to the next label.
__ bind(&throw_termination_exception); __ bind(&throw_termination_exception);
GenerateThrowUncatchable(masm, TERMINATION); __ ThrowUncatchable(r0);
__ bind(&throw_normal_exception); __ bind(&throw_normal_exception);
GenerateThrowTOS(masm); __ Throw(r0);
} }
@ -4912,10 +4920,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Label termination_exception; Label termination_exception;
__ b(eq, &termination_exception); __ b(eq, &termination_exception);
__ Throw(r0); // Expects thrown value in r0. __ Throw(r0);
__ bind(&termination_exception); __ bind(&termination_exception);
__ ThrowUncatchable(TERMINATION, r0); // Expects thrown value in r0. __ ThrowUncatchable(r0);
__ bind(&failure); __ bind(&failure);
// For failure and exception return null. // For failure and exception return null.
@ -7059,11 +7067,13 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ r2, r1, r3, EMIT_REMEMBERED_SET }, { r2, r1, r3, EMIT_REMEMBERED_SET },
{ r3, r1, r2, EMIT_REMEMBERED_SET }, { r3, r1, r2, EMIT_REMEMBERED_SET },
// KeyedStoreStubCompiler::GenerateStoreFastElement. // KeyedStoreStubCompiler::GenerateStoreFastElement.
{ r4, r2, r3, EMIT_REMEMBERED_SET }, { r3, r2, r4, EMIT_REMEMBERED_SET },
{ r2, r3, r4, EMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateSmiOnlyToObject // ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject // and ElementsTransitionGenerator::GenerateDoubleToObject
{ r2, r3, r9, EMIT_REMEMBERED_SET }, { r2, r3, r9, EMIT_REMEMBERED_SET },
{ r2, r3, r9, OMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateDoubleToObject // ElementsTransitionGenerator::GenerateDoubleToObject
{ r6, r2, r0, EMIT_REMEMBERED_SET }, { r6, r2, r0, EMIT_REMEMBERED_SET },
{ r2, r6, r9, EMIT_REMEMBERED_SET }, { r2, r6, r9, EMIT_REMEMBERED_SET },

55
deps/v8/src/arm/codegen-arm.cc

@ -90,11 +90,16 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
// -- r3 : target map, scratch for subsequent call // -- r3 : target map, scratch for subsequent call
// -- r4 : scratch (elements) // -- r4 : scratch (elements)
// ----------------------------------- // -----------------------------------
Label loop, entry, convert_hole, gc_required; Label loop, entry, convert_hole, gc_required, only_change_map, done;
bool vfp3_supported = CpuFeatures::IsSupported(VFP3); bool vfp3_supported = CpuFeatures::IsSupported(VFP3);
__ push(lr);
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset)); __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
__ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
__ b(eq, &only_change_map);
__ push(lr);
__ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset)); __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
// r4: source FixedArray // r4: source FixedArray
// r5: number of elements (smi-tagged) // r5: number of elements (smi-tagged)
@ -117,7 +122,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
r9, r9,
kLRHasBeenSaved, kLRHasBeenSaved,
kDontSaveFPRegs, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK); OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray. // Replace receiver's backing store with newly created FixedDoubleArray.
__ add(r3, r6, Operand(kHeapObjectTag)); __ add(r3, r6, Operand(kHeapObjectTag));
@ -146,6 +151,18 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
__ b(&entry); __ b(&entry);
__ bind(&only_change_map);
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ RecordWriteField(r2,
HeapObject::kMapOffset,
r3,
r9,
kLRHasBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ b(&done);
// Call into runtime if GC is required. // Call into runtime if GC is required.
__ bind(&gc_required); __ bind(&gc_required);
__ pop(lr); __ pop(lr);
@ -194,6 +211,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
if (!vfp3_supported) __ Pop(r1, r0); if (!vfp3_supported) __ Pop(r1, r0);
__ pop(lr); __ pop(lr);
__ bind(&done);
} }
@ -207,10 +225,15 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// -- r3 : target map, scratch for subsequent call // -- r3 : target map, scratch for subsequent call
// -- r4 : scratch (elements) // -- r4 : scratch (elements)
// ----------------------------------- // -----------------------------------
Label entry, loop, convert_hole, gc_required; Label entry, loop, convert_hole, gc_required, only_change_map;
__ push(lr); // Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset)); __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
__ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
__ b(eq, &only_change_map);
__ push(lr);
__ Push(r3, r2, r1, r0); __ Push(r3, r2, r1, r0);
__ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset)); __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
// r4: source FixedDoubleArray // r4: source FixedDoubleArray
@ -280,16 +303,6 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ b(lt, &loop); __ b(lt, &loop);
__ Pop(r3, r2, r1, r0); __ Pop(r3, r2, r1, r0);
// Update receiver's map.
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ RecordWriteField(r2,
HeapObject::kMapOffset,
r3,
r9,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created and filled FixedArray. // Replace receiver's backing store with newly created and filled FixedArray.
__ str(r6, FieldMemOperand(r2, JSObject::kElementsOffset)); __ str(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
__ RecordWriteField(r2, __ RecordWriteField(r2,
@ -301,6 +314,18 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
EMIT_REMEMBERED_SET, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK); OMIT_SMI_CHECK);
__ pop(lr); __ pop(lr);
__ bind(&only_change_map);
// Update receiver's map.
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ RecordWriteField(r2,
HeapObject::kMapOffset,
r3,
r9,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
} }

69
deps/v8/src/arm/full-codegen-arm.cc

@ -123,10 +123,8 @@ class JumpPatchSite BASE_EMBEDDED {
// //
// The function builds a JS frame. Please see JavaScriptFrameConstants in // The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-arm.h for its layout. // frames-arm.h for its layout.
void FullCodeGenerator::Generate(CompilationInfo* info) { void FullCodeGenerator::Generate() {
ASSERT(info_ == NULL); CompilationInfo* info = info_;
info_ = info;
scope_ = info->scope();
handler_table_ = handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
SetFunctionPosition(function()); SetFunctionPosition(function());
@ -142,7 +140,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// We can optionally optimize based on counters rather than statistical // We can optionally optimize based on counters rather than statistical
// sampling. // sampling.
if (info->ShouldSelfOptimize()) { if (info->ShouldSelfOptimize()) {
if (FLAG_trace_opt) { if (FLAG_trace_opt_verbose) {
PrintF("[adding self-optimization header to %s]\n", PrintF("[adding self-optimization header to %s]\n",
*info->function()->debug_name()->ToCString()); *info->function()->debug_name()->ToCString());
} }
@ -331,7 +329,8 @@ void FullCodeGenerator::ClearAccumulator() {
} }
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) { void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check"); Comment cmnt(masm_, "[ Stack check");
Label ok; Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex); __ LoadRoot(ip, Heap::kStackLimitRootIndex);
@ -935,6 +934,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(r0, null_value); __ cmp(r0, null_value);
__ b(eq, &exit); __ b(eq, &exit);
PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
// Convert the object to a JS object. // Convert the object to a JS object.
Label convert, done_convert; Label convert, done_convert;
__ JumpIfSmi(r0, &convert); __ JumpIfSmi(r0, &convert);
@ -956,48 +957,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// the JSObject::IsSimpleEnum cache validity checks. If we cannot // the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache // guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array. // validity or get the property names in a fixed array.
Label next; __ CheckEnumCache(null_value, &call_runtime);
// Preload a couple of values used in the loop.
Register empty_fixed_array_value = r6;
__ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
Register empty_descriptor_array_value = r7;
__ LoadRoot(empty_descriptor_array_value,
Heap::kEmptyDescriptorArrayRootIndex);
__ mov(r1, r0);
__ bind(&next);
// Check that there are no elements. Register r1 contains the
// current JS object we've reached through the prototype chain.
__ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
__ cmp(r2, empty_fixed_array_value);
__ b(ne, &call_runtime);
// Check that instance descriptors are not empty so that we can
// check for an enum cache. Leave the map in r2 for the subsequent
// prototype load.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset));
__ JumpIfSmi(r3, &call_runtime);
// Check that there is an enum cache in the non-empty instance
// descriptors (r3). This is the case if the next enumeration
// index field does not contain a smi.
__ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset));
__ JumpIfSmi(r3, &call_runtime);
// For all objects but the receiver, check that the cache is empty.
Label check_prototype;
__ cmp(r1, r0);
__ b(eq, &check_prototype);
__ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset));
__ cmp(r3, empty_fixed_array_value);
__ b(ne, &call_runtime);
// Load the prototype from the map and loop if non-null.
__ bind(&check_prototype);
__ ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset));
__ cmp(r1, null_value);
__ b(ne, &next);
// The enum cache is valid. Load the map of the object being // The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration. // iterated over and use the cache for the iteration.
@ -1050,6 +1010,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(r1, r0); // Fixed array length (as smi) and initial index. __ Push(r1, r0); // Fixed array length (as smi) and initial index.
// Generate code for doing the condition check. // Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop); __ bind(&loop);
// Load the current count to r0, load the length to r1. // Load the current count to r0, load the length to r1.
__ Ldrd(r0, r1, MemOperand(sp, 0 * kPointerSize)); __ Ldrd(r0, r1, MemOperand(sp, 0 * kPointerSize));
@ -1093,7 +1054,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(result_register(), r3); __ mov(result_register(), r3);
// Perform the assignment as if via '='. // Perform the assignment as if via '='.
{ EffectContext context(this); { EffectContext context(this);
EmitAssignment(stmt->each(), stmt->AssignmentId()); EmitAssignment(stmt->each());
} }
// Generate code for the body of the loop. // Generate code for the body of the loop.
@ -1106,7 +1067,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ add(r0, r0, Operand(Smi::FromInt(1))); __ add(r0, r0, Operand(Smi::FromInt(1)));
__ push(r0); __ push(r0);
EmitStackCheck(stmt); EmitStackCheck(stmt, &loop);
__ b(&loop); __ b(&loop);
// Remove the pointers stored on the stack. // Remove the pointers stored on the stack.
@ -1114,6 +1075,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Drop(5); __ Drop(5);
// Exit and decrement the loop depth. // Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
__ bind(&exit); __ bind(&exit);
decrement_loop_depth(); decrement_loop_depth();
} }
@ -1524,7 +1486,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Smi::FromInt(0))); Smi::FromInt(0)));
__ push(r1); __ push(r1);
VisitForStackValue(value); VisitForStackValue(value);
__ CallRuntime(Runtime::kDefineAccessor, 4); __ mov(r0, Operand(Smi::FromInt(NONE)));
__ push(r0);
__ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
break; break;
} }
} }
@ -1875,7 +1839,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
} }
void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) { void FullCodeGenerator::EmitAssignment(Expression* expr) {
// Invalid left-hand sides are rewritten to have a 'throw // Invalid left-hand sides are rewritten to have a 'throw
// ReferenceError' on the left-hand side. // ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) { if (!expr->IsValidLeftHandSide()) {
@ -1927,7 +1891,6 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
break; break;
} }
} }
PrepareForBailoutForId(bailout_ast_id, TOS_REG);
context()->Plug(r0); context()->Plug(r0);
} }

46
deps/v8/src/arm/lithium-arm.cc

@ -1125,6 +1125,11 @@ LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
} }
LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
return MarkAsCall(new LDeclareGlobals, instr);
}
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) { LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
LOperand* context = UseRegisterAtStart(instr->value()); LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LGlobalObject(context)); return DefineAsRegister(new LGlobalObject(context));
@ -2088,19 +2093,18 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
} }
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) { LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
return MarkAsCall(DefineFixed(new LArrayLiteral, r0), instr); return MarkAsCall(DefineFixed(new LFastLiteral, r0), instr);
} }
LInstruction* LChunkBuilder::DoObjectLiteralFast(HObjectLiteralFast* instr) { LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
return MarkAsCall(DefineFixed(new LObjectLiteralFast, r0), instr); return MarkAsCall(DefineFixed(new LArrayLiteral, r0), instr);
} }
LInstruction* LChunkBuilder::DoObjectLiteralGeneric( LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
HObjectLiteralGeneric* instr) { return MarkAsCall(DefineFixed(new LObjectLiteral, r0), instr);
return MarkAsCall(DefineFixed(new LObjectLiteralGeneric, r0), instr);
} }
@ -2264,4 +2268,32 @@ LInstruction* LChunkBuilder::DoIn(HIn* instr) {
} }
LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
LOperand* object = UseFixed(instr->enumerable(), r0);
LForInPrepareMap* result = new LForInPrepareMap(object);
return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
LOperand* map = UseRegister(instr->map());
return AssignEnvironment(DefineAsRegister(
new LForInCacheArray(map)));
}
LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* map = UseRegisterAtStart(instr->map());
return AssignEnvironment(new LCheckMapValue(value, map));
}
LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* index = UseRegister(instr->index());
return DefineAsRegister(new LLoadFieldByIndex(object, index));
}
} } // namespace v8::internal } } // namespace v8::internal

93
deps/v8/src/arm/lithium-arm.h

@ -87,11 +87,13 @@ class LCodeGen;
V(ConstantI) \ V(ConstantI) \
V(ConstantT) \ V(ConstantT) \
V(Context) \ V(Context) \
V(DeclareGlobals) \
V(DeleteProperty) \ V(DeleteProperty) \
V(Deoptimize) \ V(Deoptimize) \
V(DivI) \ V(DivI) \
V(DoubleToI) \ V(DoubleToI) \
V(ElementsKind) \ V(ElementsKind) \
V(FastLiteral) \
V(FixedArrayBaseLength) \ V(FixedArrayBaseLength) \
V(FunctionLiteral) \ V(FunctionLiteral) \
V(GetCachedArrayIndex) \ V(GetCachedArrayIndex) \
@ -134,8 +136,7 @@ class LCodeGen;
V(NumberTagD) \ V(NumberTagD) \
V(NumberTagI) \ V(NumberTagI) \
V(NumberUntagD) \ V(NumberUntagD) \
V(ObjectLiteralFast) \ V(ObjectLiteral) \
V(ObjectLiteralGeneric) \
V(OsrEntry) \ V(OsrEntry) \
V(OuterContext) \ V(OuterContext) \
V(Parameter) \ V(Parameter) \
@ -171,7 +172,12 @@ class LCodeGen;
V(TypeofIsAndBranch) \ V(TypeofIsAndBranch) \
V(UnaryMathOperation) \ V(UnaryMathOperation) \
V(UnknownOSRValue) \ V(UnknownOSRValue) \
V(ValueOf) V(ValueOf) \
V(ForInPrepareMap) \
V(ForInCacheArray) \
V(CheckMapValue) \
V(LoadFieldByIndex)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@ -1346,6 +1352,13 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
}; };
class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
};
class LGlobalObject: public LTemplateInstruction<1, 1, 0> { class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LGlobalObject(LOperand* context) { explicit LGlobalObject(LOperand* context) {
@ -1909,24 +1922,24 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
}; };
class LArrayLiteral: public LTemplateInstruction<1, 0, 0> { class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
public: public:
DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal") DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral) DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
}; };
class LObjectLiteralFast: public LTemplateInstruction<1, 0, 0> { class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
public: public:
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast, "object-literal-fast") DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralFast) DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
}; };
class LObjectLiteralGeneric: public LTemplateInstruction<1, 0, 0> { class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
public: public:
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric, "object-literal-generic") DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralGeneric) DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
}; };
@ -2056,6 +2069,62 @@ class LIn: public LTemplateInstruction<1, 2, 0> {
}; };
class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInPrepareMap(LOperand* object) {
inputs_[0] = object;
}
LOperand* object() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
};
class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
}
LOperand* map() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
int idx() {
return HForInCacheArray::cast(this->hydrogen_value())->idx();
}
};
class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
public:
LCheckMapValue(LOperand* value, LOperand* map) {
inputs_[0] = value;
inputs_[1] = map;
}
LOperand* value() { return inputs_[0]; }
LOperand* map() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
};
class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
inputs_[1] = index;
}
LOperand* object() { return inputs_[0]; }
LOperand* index() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
};
class LChunkBuilder; class LChunkBuilder;
class LChunk: public ZoneObject { class LChunk: public ZoneObject {
public: public:

161
deps/v8/src/arm/lithium-codegen-arm.cc

@ -2873,6 +2873,16 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) {
} }
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ push(cp); // The context is the first argument.
__ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
__ push(scratch0());
__ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ push(scratch0());
CallRuntime(Runtime::kDeclareGlobals, 3, instr);
}
void LCodeGen::DoGlobalObject(LGlobalObject* instr) { void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
__ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX)); __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
@ -4370,26 +4380,35 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
ASSERT(!source.is(r2)); ASSERT(!source.is(r2));
ASSERT(!result.is(r2)); ASSERT(!result.is(r2));
// Only elements backing stores for non-COW arrays need to be copied.
Handle<FixedArrayBase> elements(object->elements());
bool has_elements = elements->length() > 0 &&
elements->map() != isolate()->heap()->fixed_cow_array_map();
// Increase the offset so that subsequent objects end up right after // Increase the offset so that subsequent objects end up right after
// this one. // this object and its backing store.
int current_offset = *offset; int object_offset = *offset;
int size = object->map()->instance_size(); int object_size = object->map()->instance_size();
*offset += size; int elements_offset = *offset + object_size;
int elements_size = has_elements ? elements->Size() : 0;
*offset += object_size + elements_size;
// Copy object header. // Copy object header.
ASSERT(object->properties()->length() == 0); ASSERT(object->properties()->length() == 0);
ASSERT(object->elements()->length() == 0 ||
object->elements()->map() == isolate()->heap()->fixed_cow_array_map());
int inobject_properties = object->map()->inobject_properties(); int inobject_properties = object->map()->inobject_properties();
int header_size = size - inobject_properties * kPointerSize; int header_size = object_size - inobject_properties * kPointerSize;
for (int i = 0; i < header_size; i += kPointerSize) { for (int i = 0; i < header_size; i += kPointerSize) {
if (has_elements && i == JSObject::kElementsOffset) {
__ add(r2, result, Operand(elements_offset));
} else {
__ ldr(r2, FieldMemOperand(source, i)); __ ldr(r2, FieldMemOperand(source, i));
__ str(r2, FieldMemOperand(result, current_offset + i)); }
__ str(r2, FieldMemOperand(result, object_offset + i));
} }
// Copy in-object properties. // Copy in-object properties.
for (int i = 0; i < inobject_properties; i++) { for (int i = 0; i < inobject_properties; i++) {
int total_offset = current_offset + object->GetInObjectPropertyOffset(i); int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i)); Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
if (value->IsJSObject()) { if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value); Handle<JSObject> value_object = Handle<JSObject>::cast(value);
@ -4405,10 +4424,41 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
__ str(r2, FieldMemOperand(result, total_offset)); __ str(r2, FieldMemOperand(result, total_offset));
} }
} }
// Copy elements backing store header.
ASSERT(!has_elements || elements->IsFixedArray());
if (has_elements) {
__ LoadHeapObject(source, elements);
for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
__ ldr(r2, FieldMemOperand(source, i));
__ str(r2, FieldMemOperand(result, elements_offset + i));
}
}
// Copy elements backing store content.
ASSERT(!has_elements || elements->IsFixedArray());
int elements_length = has_elements ? elements->length() : 0;
for (int i = 0; i < elements_length; i++) {
int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
Handle<Object> value = JSObject::GetElement(object, i);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ add(r2, result, Operand(*offset));
__ str(r2, FieldMemOperand(result, total_offset));
__ LoadHeapObject(source, value_object);
EmitDeepCopy(value_object, result, source, offset);
} else if (value->IsHeapObject()) {
__ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
__ str(r2, FieldMemOperand(result, total_offset));
} else {
__ mov(r2, Operand(value));
__ str(r2, FieldMemOperand(result, total_offset));
}
}
} }
void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) { void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
int size = instr->hydrogen()->total_size(); int size = instr->hydrogen()->total_size();
// Allocate all objects that are part of the literal in one big // Allocate all objects that are part of the literal in one big
@ -4430,12 +4480,13 @@ void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
} }
void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) { void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
Handle<FixedArray> literals(instr->environment()->closure()->literals());
Handle<FixedArray> constant_properties = Handle<FixedArray> constant_properties =
instr->hydrogen()->constant_properties(); instr->hydrogen()->constant_properties();
__ ldr(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); // Set up the parameters to the stub/runtime call.
__ ldr(r4, FieldMemOperand(r4, JSFunction::kLiteralsOffset)); __ LoadHeapObject(r4, literals);
__ mov(r3, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); __ mov(r3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
__ mov(r2, Operand(constant_properties)); __ mov(r2, Operand(constant_properties));
int flags = instr->hydrogen()->fast_elements() int flags = instr->hydrogen()->fast_elements()
@ -4444,7 +4495,7 @@ void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
__ mov(r1, Operand(Smi::FromInt(flags))); __ mov(r1, Operand(Smi::FromInt(flags)));
__ Push(r4, r3, r2, r1); __ Push(r4, r3, r2, r1);
// Pick the right runtime function to call. // Pick the right runtime function or stub to call.
int properties_count = constant_properties->length() / 2; int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) { if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr); CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
@ -4799,6 +4850,88 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
} }
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
DeoptimizeIf(eq, instr->environment());
Register null_value = r5;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ cmp(r0, null_value);
DeoptimizeIf(eq, instr->environment());
__ tst(r0, Operand(kSmiTagMask));
DeoptimizeIf(eq, instr->environment());
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
DeoptimizeIf(le, instr->environment());
Label use_cache, call_runtime;
__ CheckEnumCache(null_value, &call_runtime);
__ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
__ b(&use_cache);
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(r0);
CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
__ cmp(r1, ip);
DeoptimizeIf(ne, instr->environment());
__ bind(&use_cache);
}
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register map = ToRegister(instr->map());
Register result = ToRegister(instr->result());
__ LoadInstanceDescriptors(map, result);
__ ldr(result,
FieldMemOperand(result, DescriptorArray::kEnumerationIndexOffset));
__ ldr(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ cmp(result, Operand(0));
DeoptimizeIf(eq, instr->environment());
}
void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
Register map = ToRegister(instr->map());
__ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
__ cmp(map, scratch0());
DeoptimizeIf(ne, instr->environment());
}
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
Register object = ToRegister(instr->object());
Register index = ToRegister(instr->index());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
Label out_of_object, done;
__ cmp(index, Operand(0));
__ b(lt, &out_of_object);
STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
__ add(scratch, object, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
__ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
__ b(&done);
__ bind(&out_of_object);
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
// Index is equal to negated out of object property index plus 1.
__ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
__ ldr(result, FieldMemOperand(scratch,
FixedArray::kHeaderSize - kPointerSize));
__ bind(&done);
}
#undef __ #undef __

66
deps/v8/src/arm/macro-assembler-arm.cc

@ -1281,8 +1281,7 @@ void MacroAssembler::Throw(Register value) {
} }
void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type, void MacroAssembler::ThrowUncatchable(Register value) {
Register value) {
// Adjust this code if not the case. // Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
@ -1292,24 +1291,9 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The exception is expected in r0. // The exception is expected in r0.
if (type == OUT_OF_MEMORY) { if (!value.is(r0)) {
// Set external caught exception to false.
ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
isolate());
mov(r0, Operand(false, RelocInfo::NONE));
mov(r2, Operand(external_caught));
str(r0, MemOperand(r2));
// Set pending exception and r0 to out of memory exception.
Failure* out_of_memory = Failure::OutOfMemoryException();
mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate())));
str(r0, MemOperand(r2));
} else if (!value.is(r0)) {
mov(r0, value); mov(r0, value);
} }
// Drop the stack pointer to the top of the top stack handler. // Drop the stack pointer to the top of the top stack handler.
mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
ldr(sp, MemOperand(r3)); ldr(sp, MemOperand(r3));
@ -3680,6 +3664,52 @@ void MacroAssembler::LoadInstanceDescriptors(Register map,
} }
void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
Label next;
// Preload a couple of values used in the loop.
Register empty_fixed_array_value = r6;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
Register empty_descriptor_array_value = r7;
LoadRoot(empty_descriptor_array_value,
Heap::kEmptyDescriptorArrayRootIndex);
mov(r1, r0);
bind(&next);
// Check that there are no elements. Register r1 contains the
// current JS object we've reached through the prototype chain.
ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
cmp(r2, empty_fixed_array_value);
b(ne, call_runtime);
// Check that instance descriptors are not empty so that we can
// check for an enum cache. Leave the map in r2 for the subsequent
// prototype load.
ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset));
JumpIfSmi(r3, call_runtime);
// Check that there is an enum cache in the non-empty instance
// descriptors (r3). This is the case if the next enumeration
// index field does not contain a smi.
ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset));
JumpIfSmi(r3, call_runtime);
// For all objects but the receiver, check that the cache is empty.
Label check_prototype;
cmp(r1, r0);
b(eq, &check_prototype);
ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset));
cmp(r3, empty_fixed_array_value);
b(ne, call_runtime);
// Load the prototype from the map and loop if non-null.
bind(&check_prototype);
ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset));
cmp(r1, null_value);
b(ne, &next);
}
bool AreAliased(Register r1, Register r2, Register r3, Register r4) { bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
if (r1.is(r2)) return true; if (r1.is(r2)) return true;
if (r1.is(r3)) return true; if (r1.is(r3)) return true;

8
deps/v8/src/arm/macro-assembler-arm.h

@ -588,12 +588,12 @@ class MacroAssembler: public Assembler {
// Must preserve the result register. // Must preserve the result register.
void PopTryHandler(); void PopTryHandler();
// Passes thrown value (in r0) to the handler of top of the try handler chain. // Passes thrown value to the handler of top of the try handler chain.
void Throw(Register value); void Throw(Register value);
// Propagates an uncatchable exception to the top of the current JS stack's // Propagates an uncatchable exception to the top of the current JS stack's
// handler chain. // handler chain.
void ThrowUncatchable(UncatchableExceptionType type, Register value); void ThrowUncatchable(Register value);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Inline caching support // Inline caching support
@ -1259,6 +1259,10 @@ class MacroAssembler: public Assembler {
void EnterFrame(StackFrame::Type type); void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type); void LeaveFrame(StackFrame::Type type);
// Expects object in r0 and returns map with validated enum cache
// in r0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Register null_value, Label* call_runtime);
private: private:
void CallCFunctionHelper(Register function, void CallCFunctionHelper(Register function,
int num_reg_arguments, int num_reg_arguments,

4
deps/v8/src/arm/simulator-arm.cc

@ -1277,9 +1277,9 @@ void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
// Returns the limit of the stack area to enable checking for stack overflows. // Returns the limit of the stack area to enable checking for stack overflows.
uintptr_t Simulator::StackLimit() const { uintptr_t Simulator::StackLimit() const {
// Leave a safety margin of 512 bytes to prevent overrunning the stack when // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
// pushing values. // pushing values.
return reinterpret_cast<uintptr_t>(stack_) + 512; return reinterpret_cast<uintptr_t>(stack_) + 1024;
} }

186
deps/v8/src/arm/stub-cache-arm.cc

@ -3076,7 +3076,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
ElementsKind elements_kind = receiver_map->elements_kind(); ElementsKind elements_kind = receiver_map->elements_kind();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub = Handle<Code> stub =
KeyedStoreElementStub(is_js_array, elements_kind).GetCode(); KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
__ DispatchMap(r2, r3, receiver_map, stub, DO_SMI_CHECK); __ DispatchMap(r2, r3, receiver_map, stub, DO_SMI_CHECK);
@ -4121,7 +4121,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
void KeyedStoreStubCompiler::GenerateStoreFastElement( void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm, MacroAssembler* masm,
bool is_js_array, bool is_js_array,
ElementsKind elements_kind) { ElementsKind elements_kind,
KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : value // -- r0 : value
// -- r1 : key // -- r1 : key
@ -4130,13 +4131,16 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// -- r3 : scratch // -- r3 : scratch
// -- r4 : scratch (elements) // -- r4 : scratch (elements)
// ----------------------------------- // -----------------------------------
Label miss_force_generic, transition_elements_kind; Label miss_force_generic, transition_elements_kind, grow, slow;
Label finish_store, check_capacity;
Register value_reg = r0; Register value_reg = r0;
Register key_reg = r1; Register key_reg = r1;
Register receiver_reg = r2; Register receiver_reg = r2;
Register scratch = r3; Register scratch = r4;
Register elements_reg = r4; Register elements_reg = r3;
Register length_reg = r5;
Register scratch2 = r6;
// This stub is meant to be tail-jumped to, the receiver must already // This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi. // have been verified by the caller to not be a smi.
@ -4144,16 +4148,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Check that the key is a smi. // Check that the key is a smi.
__ JumpIfNotSmi(key_reg, &miss_force_generic); __ JumpIfNotSmi(key_reg, &miss_force_generic);
// Get the elements array and make sure it is a fast element array, not 'cow'. if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ ldr(elements_reg, __ JumpIfNotSmi(value_reg, &transition_elements_kind);
FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); }
__ CheckMap(elements_reg,
scratch,
Heap::kFixedArrayMapRootIndex,
&miss_force_generic,
DONT_DO_SMI_CHECK);
// Check that the key is within bounds. // Check that the key is within bounds.
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
if (is_js_array) { if (is_js_array) {
__ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
} else { } else {
@ -4161,10 +4162,21 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
} }
// Compare smis. // Compare smis.
__ cmp(key_reg, scratch); __ cmp(key_reg, scratch);
if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
__ b(hs, &grow);
} else {
__ b(hs, &miss_force_generic); __ b(hs, &miss_force_generic);
}
// Make sure elements is a fast element array, not 'cow'.
__ CheckMap(elements_reg,
scratch,
Heap::kFixedArrayMapRootIndex,
&miss_force_generic,
DONT_DO_SMI_CHECK);
__ bind(&finish_store);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind);
__ add(scratch, __ add(scratch,
elements_reg, elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag)); Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@ -4202,12 +4214,80 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ bind(&transition_elements_kind); __ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic_miss, RelocInfo::CODE_TARGET); __ Jump(ic_miss, RelocInfo::CODE_TARGET);
if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
// Grow the array by a single element if possible.
__ bind(&grow);
// Make sure the array is only growing by a single element, anything else
// must be handled by the runtime. Flags already set by previous compare.
__ b(ne, &miss_force_generic);
// Check for the empty array, and preallocate a small backing store if
// possible.
__ ldr(length_reg,
FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
__ b(ne, &check_capacity);
int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
__ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow,
TAG_OBJECT);
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
__ str(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
__ mov(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
__ str(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
__ str(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
}
// Store the element at index zero.
__ str(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
// Install the new backing store in the JSArray.
__ str(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
scratch, kLRHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Increment the length of the array.
__ mov(length_reg, Operand(Smi::FromInt(1)));
__ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ Ret();
__ bind(&check_capacity);
// Check for cow elements, in general they are not handled by this stub
__ CheckMap(elements_reg,
scratch,
Heap::kFixedCOWArrayMapRootIndex,
&miss_force_generic,
DONT_DO_SMI_CHECK);
__ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
__ cmp(length_reg, scratch);
__ b(hs, &slow);
// Grow the array and finish the store.
__ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
__ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ jmp(&finish_store);
__ bind(&slow);
Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
__ Jump(ic_slow, RelocInfo::CODE_TARGET);
}
} }
void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
MacroAssembler* masm, MacroAssembler* masm,
bool is_js_array) { bool is_js_array,
KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : value // -- r0 : value
// -- r1 : key // -- r1 : key
@ -4217,7 +4297,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- r4 : scratch // -- r4 : scratch
// -- r5 : scratch // -- r5 : scratch
// ----------------------------------- // -----------------------------------
Label miss_force_generic, transition_elements_kind; Label miss_force_generic, transition_elements_kind, grow, slow;
Label finish_store, check_capacity;
Register value_reg = r0; Register value_reg = r0;
Register key_reg = r1; Register key_reg = r1;
@ -4227,6 +4308,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Register scratch2 = r5; Register scratch2 = r5;
Register scratch3 = r6; Register scratch3 = r6;
Register scratch4 = r7; Register scratch4 = r7;
Register length_reg = r7;
// This stub is meant to be tail-jumped to, the receiver must already // This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi. // have been verified by the caller to not be a smi.
@ -4245,8 +4327,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// Compare smis, unsigned compare catches both negative and out-of-bound // Compare smis, unsigned compare catches both negative and out-of-bound
// indexes. // indexes.
__ cmp(key_reg, scratch1); __ cmp(key_reg, scratch1);
if (grow_mode == ALLOW_JSARRAY_GROWTH) {
__ b(hs, &grow);
} else {
__ b(hs, &miss_force_generic); __ b(hs, &miss_force_generic);
}
__ bind(&finish_store);
__ StoreNumberToDoubleElements(value_reg, __ StoreNumberToDoubleElements(value_reg,
key_reg, key_reg,
receiver_reg, receiver_reg,
@ -4267,6 +4354,73 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ bind(&transition_elements_kind); __ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic_miss, RelocInfo::CODE_TARGET); __ Jump(ic_miss, RelocInfo::CODE_TARGET);
if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
// Grow the array by a single element if possible.
__ bind(&grow);
// Make sure the array is only growing by a single element, anything else
// must be handled by the runtime. Flags already set by previous compare.
__ b(ne, &miss_force_generic);
// Transition on values that can't be stored in a FixedDoubleArray.
Label value_is_smi;
__ JumpIfSmi(value_reg, &value_is_smi);
__ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
__ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
__ b(ne, &transition_elements_kind);
__ bind(&value_is_smi);
// Check for the empty array, and preallocate a small backing store if
// possible.
__ ldr(length_reg,
FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
__ b(ne, &check_capacity);
int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
__ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
TAG_OBJECT);
// Initialize the new FixedDoubleArray. Leave elements unitialized for
// efficiency, they are guaranteed to be initialized before use.
__ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
__ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
__ mov(scratch1,
Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
__ str(scratch1,
FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
// Install the new backing store in the JSArray.
__ str(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Increment the length of the array.
__ mov(length_reg, Operand(Smi::FromInt(1)));
__ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ jmp(&finish_store);
__ bind(&check_capacity);
// Make sure that the backing store can hold additional elements.
__ ldr(scratch1,
FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
__ cmp(length_reg, scratch1);
__ b(hs, &slow);
// Grow the array and finish the store.
__ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
__ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ jmp(&finish_store);
__ bind(&slow);
Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
__ Jump(ic_slow, RelocInfo::CODE_TARGET);
}
} }

62
deps/v8/src/array.js

@ -27,7 +27,7 @@
// This file relies on the fact that the following declarations have been made // This file relies on the fact that the following declarations have been made
// in runtime.js: // in runtime.js:
// const $Array = global.Array; // var $Array = global.Array;
// ------------------------------------------------------------------- // -------------------------------------------------------------------
@ -757,7 +757,7 @@ function ArraySort(comparefn) {
} }
var receiver = %GetDefaultReceiver(comparefn); var receiver = %GetDefaultReceiver(comparefn);
function InsertionSort(a, from, to) { var InsertionSort = function InsertionSort(a, from, to) {
for (var i = from + 1; i < to; i++) { for (var i = from + 1; i < to; i++) {
var element = a[i]; var element = a[i];
for (var j = i - 1; j >= from; j--) { for (var j = i - 1; j >= from; j--) {
@ -771,9 +771,9 @@ function ArraySort(comparefn) {
} }
a[j + 1] = element; a[j + 1] = element;
} }
} };
function QuickSort(a, from, to) { var QuickSort = function QuickSort(a, from, to) {
// Insertion sort is faster for short arrays. // Insertion sort is faster for short arrays.
if (to - from <= 10) { if (to - from <= 10) {
InsertionSort(a, from, to); InsertionSort(a, from, to);
@ -841,12 +841,12 @@ function ArraySort(comparefn) {
} }
QuickSort(a, from, low_end); QuickSort(a, from, low_end);
QuickSort(a, high_start, to); QuickSort(a, high_start, to);
} };
// Copy elements in the range 0..length from obj's prototype chain // Copy elements in the range 0..length from obj's prototype chain
// to obj itself, if obj has holes. Return one more than the maximal index // to obj itself, if obj has holes. Return one more than the maximal index
// of a prototype property. // of a prototype property.
function CopyFromPrototype(obj, length) { var CopyFromPrototype = function CopyFromPrototype(obj, length) {
var max = 0; var max = 0;
for (var proto = obj.__proto__; proto; proto = proto.__proto__) { for (var proto = obj.__proto__; proto; proto = proto.__proto__) {
var indices = %GetArrayKeys(proto, length); var indices = %GetArrayKeys(proto, length);
@ -873,12 +873,12 @@ function ArraySort(comparefn) {
} }
} }
return max; return max;
} };
// Set a value of "undefined" on all indices in the range from..to // Set a value of "undefined" on all indices in the range from..to
// where a prototype of obj has an element. I.e., shadow all prototype // where a prototype of obj has an element. I.e., shadow all prototype
// elements in that range. // elements in that range.
function ShadowPrototypeElements(obj, from, to) { var ShadowPrototypeElements = function(obj, from, to) {
for (var proto = obj.__proto__; proto; proto = proto.__proto__) { for (var proto = obj.__proto__; proto; proto = proto.__proto__) {
var indices = %GetArrayKeys(proto, to); var indices = %GetArrayKeys(proto, to);
if (indices.length > 0) { if (indices.length > 0) {
@ -901,9 +901,9 @@ function ArraySort(comparefn) {
} }
} }
} }
} };
function SafeRemoveArrayHoles(obj) { var SafeRemoveArrayHoles = function SafeRemoveArrayHoles(obj) {
// Copy defined elements from the end to fill in all holes and undefineds // Copy defined elements from the end to fill in all holes and undefineds
// in the beginning of the array. Write undefineds and holes at the end // in the beginning of the array. Write undefineds and holes at the end
// after loop is finished. // after loop is finished.
@ -958,7 +958,7 @@ function ArraySort(comparefn) {
// Return the number of defined elements. // Return the number of defined elements.
return first_undefined; return first_undefined;
} };
var length = TO_UINT32(this.length); var length = TO_UINT32(this.length);
if (length < 2) return this; if (length < 2) return this;
@ -1024,10 +1024,10 @@ function ArrayFilter(f, receiver) {
var accumulator = new InternalArray(); var accumulator = new InternalArray();
var accumulator_length = 0; var accumulator_length = 0;
for (var i = 0; i < length; i++) { for (var i = 0; i < length; i++) {
var current = array[i]; if (i in array) {
if (!IS_UNDEFINED(current) || i in array) { var element = array[i];
if (%_CallFunction(receiver, current, i, array, f)) { if (%_CallFunction(receiver, element, i, array, f)) {
accumulator[accumulator_length++] = current; accumulator[accumulator_length++] = element;
} }
} }
} }
@ -1057,9 +1057,9 @@ function ArrayForEach(f, receiver) {
} }
for (var i = 0; i < length; i++) { for (var i = 0; i < length; i++) {
var current = array[i]; if (i in array) {
if (!IS_UNDEFINED(current) || i in array) { var element = array[i];
%_CallFunction(receiver, current, i, array, f); %_CallFunction(receiver, element, i, array, f);
} }
} }
} }
@ -1088,9 +1088,9 @@ function ArraySome(f, receiver) {
} }
for (var i = 0; i < length; i++) { for (var i = 0; i < length; i++) {
var current = array[i]; if (i in array) {
if (!IS_UNDEFINED(current) || i in array) { var element = array[i];
if (%_CallFunction(receiver, current, i, array, f)) return true; if (%_CallFunction(receiver, element, i, array, f)) return true;
} }
} }
return false; return false;
@ -1118,9 +1118,9 @@ function ArrayEvery(f, receiver) {
} }
for (var i = 0; i < length; i++) { for (var i = 0; i < length; i++) {
var current = array[i]; if (i in array) {
if (!IS_UNDEFINED(current) || i in array) { var element = array[i];
if (!%_CallFunction(receiver, current, i, array, f)) return false; if (!%_CallFunction(receiver, element, i, array, f)) return false;
} }
} }
return true; return true;
@ -1149,9 +1149,9 @@ function ArrayMap(f, receiver) {
var result = new $Array(); var result = new $Array();
var accumulator = new InternalArray(length); var accumulator = new InternalArray(length);
for (var i = 0; i < length; i++) { for (var i = 0; i < length; i++) {
var current = array[i]; if (i in array) {
if (!IS_UNDEFINED(current) || i in array) { var element = array[i];
accumulator[i] = %_CallFunction(receiver, current, i, array, f); accumulator[i] = %_CallFunction(receiver, element, i, array, f);
} }
} }
%MoveArrayContents(accumulator, result); %MoveArrayContents(accumulator, result);
@ -1308,8 +1308,8 @@ function ArrayReduce(callback, current) {
var receiver = %GetDefaultReceiver(callback); var receiver = %GetDefaultReceiver(callback);
for (; i < length; i++) { for (; i < length; i++) {
if (i in array) {
var element = array[i]; var element = array[i];
if (!IS_UNDEFINED(element) || i in array) {
current = %_CallFunction(receiver, current, element, i, array, callback); current = %_CallFunction(receiver, current, element, i, array, callback);
} }
} }
@ -1345,8 +1345,8 @@ function ArrayReduceRight(callback, current) {
var receiver = %GetDefaultReceiver(callback); var receiver = %GetDefaultReceiver(callback);
for (; i >= 0; i--) { for (; i >= 0; i--) {
if (i in array) {
var element = array[i]; var element = array[i];
if (!IS_UNDEFINED(element) || i in array) {
current = %_CallFunction(receiver, current, element, i, array, callback); current = %_CallFunction(receiver, current, element, i, array, callback);
} }
} }
@ -1373,7 +1373,7 @@ function SetUpArray() {
var specialFunctions = %SpecialArrayFunctions({}); var specialFunctions = %SpecialArrayFunctions({});
function getFunction(name, jsBuiltin, len) { var getFunction = function(name, jsBuiltin, len) {
var f = jsBuiltin; var f = jsBuiltin;
if (specialFunctions.hasOwnProperty(name)) { if (specialFunctions.hasOwnProperty(name)) {
f = specialFunctions[name]; f = specialFunctions[name];
@ -1382,7 +1382,7 @@ function SetUpArray() {
%FunctionSetLength(f, len); %FunctionSetLength(f, len);
} }
return f; return f;
} };
// Set up non-enumerable functions of the Array.prototype object and // Set up non-enumerable functions of the Array.prototype object and
// set their names. // set their names.

22
deps/v8/src/ast.cc

@ -1009,6 +1009,7 @@ INCREASE_NODE_COUNT(BreakStatement)
INCREASE_NODE_COUNT(ReturnStatement) INCREASE_NODE_COUNT(ReturnStatement)
INCREASE_NODE_COUNT(Conditional) INCREASE_NODE_COUNT(Conditional)
INCREASE_NODE_COUNT(Literal) INCREASE_NODE_COUNT(Literal)
INCREASE_NODE_COUNT(ObjectLiteral)
INCREASE_NODE_COUNT(Assignment) INCREASE_NODE_COUNT(Assignment)
INCREASE_NODE_COUNT(Throw) INCREASE_NODE_COUNT(Throw)
INCREASE_NODE_COUNT(Property) INCREASE_NODE_COUNT(Property)
@ -1017,6 +1018,8 @@ INCREASE_NODE_COUNT(CountOperation)
INCREASE_NODE_COUNT(BinaryOperation) INCREASE_NODE_COUNT(BinaryOperation)
INCREASE_NODE_COUNT(CompareOperation) INCREASE_NODE_COUNT(CompareOperation)
INCREASE_NODE_COUNT(ThisFunction) INCREASE_NODE_COUNT(ThisFunction)
INCREASE_NODE_COUNT(Call)
INCREASE_NODE_COUNT(CallNew)
#undef INCREASE_NODE_COUNT #undef INCREASE_NODE_COUNT
@ -1112,33 +1115,14 @@ void AstConstructionVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
} }
void AstConstructionVisitor::VisitObjectLiteral(ObjectLiteral* node) {
increase_node_count();
add_flag(kDontInline); // TODO(1322): Allow materialized literals.
}
void AstConstructionVisitor::VisitArrayLiteral(ArrayLiteral* node) { void AstConstructionVisitor::VisitArrayLiteral(ArrayLiteral* node) {
increase_node_count(); increase_node_count();
add_flag(kDontInline); // TODO(1322): Allow materialized literals. add_flag(kDontInline); // TODO(1322): Allow materialized literals.
} }
void AstConstructionVisitor::VisitCall(Call* node) {
increase_node_count();
add_flag(kDontSelfOptimize);
}
void AstConstructionVisitor::VisitCallNew(CallNew* node) {
increase_node_count();
add_flag(kDontSelfOptimize);
}
void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) { void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
increase_node_count(); increase_node_count();
add_flag(kDontSelfOptimize);
if (node->is_jsruntime()) { if (node->is_jsruntime()) {
// Don't try to inline JS runtime calls because we don't (currently) even // Don't try to inline JS runtime calls because we don't (currently) even
// optimize them. // optimize them.

72
deps/v8/src/ast.h

@ -154,7 +154,7 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
#define DECLARE_NODE_TYPE(type) \ #define DECLARE_NODE_TYPE(type) \
virtual void Accept(AstVisitor* v); \ virtual void Accept(AstVisitor* v); \
virtual AstNode::Type node_type() const { return AstNode::k##type; } \ virtual AstNode::Type node_type() const { return AstNode::k##type; }
enum AstPropertiesFlag { enum AstPropertiesFlag {
@ -223,8 +223,6 @@ class AstNode: public ZoneObject {
virtual IterationStatement* AsIterationStatement() { return NULL; } virtual IterationStatement* AsIterationStatement() { return NULL; }
virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; } virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
static void ResetIds() { Isolate::Current()->set_ast_node_id(0); }
protected: protected:
static int GetNextId(Isolate* isolate) { static int GetNextId(Isolate* isolate) {
return ReserveIdRange(isolate, 1); return ReserveIdRange(isolate, 1);
@ -245,11 +243,6 @@ class AstNode: public ZoneObject {
}; };
#define DECLARE_NODE_TYPE(type) \
virtual void Accept(AstVisitor* v); \
virtual AstNode::Type node_type() const { return AstNode::k##type; } \
class Statement: public AstNode { class Statement: public AstNode {
public: public:
Statement() : statement_pos_(RelocInfo::kNoPosition) {} Statement() : statement_pos_(RelocInfo::kNoPosition) {}
@ -555,17 +548,17 @@ class ModuleVariable: public Module {
public: public:
DECLARE_NODE_TYPE(ModuleVariable) DECLARE_NODE_TYPE(ModuleVariable)
Variable* var() const { return var_; } VariableProxy* proxy() const { return proxy_; }
protected: protected:
template<class> friend class AstNodeFactory; template<class> friend class AstNodeFactory;
explicit ModuleVariable(Variable* var) explicit ModuleVariable(VariableProxy* proxy)
: var_(var) { : proxy_(proxy) {
} }
private: private:
Variable* var_; VariableProxy* proxy_;
}; };
@ -793,10 +786,10 @@ class ForInStatement: public IterationStatement {
Expression* each() const { return each_; } Expression* each() const { return each_; }
Expression* enumerable() const { return enumerable_; } Expression* enumerable() const { return enumerable_; }
// Bailout support.
int AssignmentId() const { return assignment_id_; }
virtual int ContinueId() const { return EntryId(); } virtual int ContinueId() const { return EntryId(); }
virtual int StackCheckId() const { return EntryId(); } virtual int StackCheckId() const { return body_id_; }
int BodyId() const { return body_id_; }
int PrepareId() const { return prepare_id_; }
protected: protected:
template<class> friend class AstNodeFactory; template<class> friend class AstNodeFactory;
@ -805,13 +798,15 @@ class ForInStatement: public IterationStatement {
: IterationStatement(isolate, labels), : IterationStatement(isolate, labels),
each_(NULL), each_(NULL),
enumerable_(NULL), enumerable_(NULL),
assignment_id_(GetNextId(isolate)) { body_id_(GetNextId(isolate)),
prepare_id_(GetNextId(isolate)) {
} }
private: private:
Expression* each_; Expression* each_;
Expression* enumerable_; Expression* enumerable_;
int assignment_id_; int body_id_;
int prepare_id_;
}; };
@ -1910,6 +1905,16 @@ class FunctionLiteral: public Expression {
DECLARATION DECLARATION
}; };
enum ParameterFlag {
kNoDuplicateParameters = 0,
kHasDuplicateParameters = 1
};
enum IsFunctionFlag {
kGlobalOrEval,
kIsFunction
};
DECLARE_NODE_TYPE(FunctionLiteral) DECLARE_NODE_TYPE(FunctionLiteral)
Handle<String> name() const { return name_; } Handle<String> name() const { return name_; }
@ -1919,6 +1924,7 @@ class FunctionLiteral: public Expression {
int function_token_position() const { return function_token_position_; } int function_token_position() const { return function_token_position_; }
int start_position() const; int start_position() const;
int end_position() const; int end_position() const;
int SourceSize() const { return end_position() - start_position(); }
bool is_expression() const { return IsExpression::decode(bitfield_); } bool is_expression() const { return IsExpression::decode(bitfield_); }
bool is_anonymous() const { return IsAnonymous::decode(bitfield_); } bool is_anonymous() const { return IsAnonymous::decode(bitfield_); }
bool is_classic_mode() const { return language_mode() == CLASSIC_MODE; } bool is_classic_mode() const { return language_mode() == CLASSIC_MODE; }
@ -1954,6 +1960,8 @@ class FunctionLiteral: public Expression {
return HasDuplicateParameters::decode(bitfield_); return HasDuplicateParameters::decode(bitfield_);
} }
bool is_function() { return IsFunction::decode(bitfield_) == kIsFunction; }
int ast_node_count() { return ast_properties_.node_count(); } int ast_node_count() { return ast_properties_.node_count(); }
AstProperties::Flags* flags() { return ast_properties_.flags(); } AstProperties::Flags* flags() { return ast_properties_.flags(); }
void set_ast_properties(AstProperties* ast_properties) { void set_ast_properties(AstProperties* ast_properties) {
@ -1974,7 +1982,8 @@ class FunctionLiteral: public Expression {
Handle<FixedArray> this_property_assignments, Handle<FixedArray> this_property_assignments,
int parameter_count, int parameter_count,
Type type, Type type,
bool has_duplicate_parameters) ParameterFlag has_duplicate_parameters,
IsFunctionFlag is_function)
: Expression(isolate), : Expression(isolate),
name_(name), name_(name),
scope_(scope), scope_(scope),
@ -1992,7 +2001,8 @@ class FunctionLiteral: public Expression {
IsExpression::encode(type != DECLARATION) | IsExpression::encode(type != DECLARATION) |
IsAnonymous::encode(type == ANONYMOUS_EXPRESSION) | IsAnonymous::encode(type == ANONYMOUS_EXPRESSION) |
Pretenure::encode(false) | Pretenure::encode(false) |
HasDuplicateParameters::encode(has_duplicate_parameters); HasDuplicateParameters::encode(has_duplicate_parameters) |
IsFunction::encode(is_function);
} }
private: private:
@ -2014,7 +2024,8 @@ class FunctionLiteral: public Expression {
class IsExpression: public BitField<bool, 1, 1> {}; class IsExpression: public BitField<bool, 1, 1> {};
class IsAnonymous: public BitField<bool, 2, 1> {}; class IsAnonymous: public BitField<bool, 2, 1> {};
class Pretenure: public BitField<bool, 3, 1> {}; class Pretenure: public BitField<bool, 3, 1> {};
class HasDuplicateParameters: public BitField<bool, 4, 1> {}; class HasDuplicateParameters: public BitField<ParameterFlag, 4, 1> {};
class IsFunction: public BitField<IsFunctionFlag, 5, 1> {};
}; };
@ -2050,6 +2061,8 @@ class ThisFunction: public Expression {
explicit ThisFunction(Isolate* isolate): Expression(isolate) {} explicit ThisFunction(Isolate* isolate): Expression(isolate) {}
}; };
#undef DECLARE_NODE_TYPE
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Regular expressions // Regular expressions
@ -2525,19 +2538,19 @@ class AstNodeFactory BASE_EMBEDDED {
VISIT_AND_RETURN(ModuleLiteral, module) VISIT_AND_RETURN(ModuleLiteral, module)
} }
ModuleVariable* NewModuleVariable(Variable* var) { ModuleVariable* NewModuleVariable(VariableProxy* proxy) {
ModuleVariable* module = new(zone_) ModuleVariable(var); ModuleVariable* module = new(zone_) ModuleVariable(proxy);
VISIT_AND_RETURN(ModuleLiteral, module) VISIT_AND_RETURN(ModuleVariable, module)
} }
ModulePath* NewModulePath(Module* origin, Handle<String> name) { ModulePath* NewModulePath(Module* origin, Handle<String> name) {
ModulePath* module = new(zone_) ModulePath(origin, name); ModulePath* module = new(zone_) ModulePath(origin, name);
VISIT_AND_RETURN(ModuleLiteral, module) VISIT_AND_RETURN(ModulePath, module)
} }
ModuleUrl* NewModuleUrl(Handle<String> url) { ModuleUrl* NewModuleUrl(Handle<String> url) {
ModuleUrl* module = new(zone_) ModuleUrl(url); ModuleUrl* module = new(zone_) ModuleUrl(url);
VISIT_AND_RETURN(ModuleLiteral, module) VISIT_AND_RETURN(ModuleUrl, module)
} }
Block* NewBlock(ZoneStringList* labels, Block* NewBlock(ZoneStringList* labels,
@ -2781,15 +2794,16 @@ class AstNodeFactory BASE_EMBEDDED {
bool has_only_simple_this_property_assignments, bool has_only_simple_this_property_assignments,
Handle<FixedArray> this_property_assignments, Handle<FixedArray> this_property_assignments,
int parameter_count, int parameter_count,
bool has_duplicate_parameters, FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::Type type, FunctionLiteral::Type type,
bool visit_with_visitor) { FunctionLiteral::IsFunctionFlag is_function) {
FunctionLiteral* lit = new(zone_) FunctionLiteral( FunctionLiteral* lit = new(zone_) FunctionLiteral(
isolate_, name, scope, body, isolate_, name, scope, body,
materialized_literal_count, expected_property_count, handler_count, materialized_literal_count, expected_property_count, handler_count,
has_only_simple_this_property_assignments, this_property_assignments, has_only_simple_this_property_assignments, this_property_assignments,
parameter_count, type, has_duplicate_parameters); parameter_count, type, has_duplicate_parameters, is_function);
if (visit_with_visitor) { // Top-level literal doesn't count for the AST's properties.
if (is_function == FunctionLiteral::kIsFunction) {
visitor_.VisitFunctionLiteral(lit); visitor_.VisitFunctionLiteral(lit);
} }
return lit; return lit;

161
deps/v8/src/bootstrapper.cc

@ -172,6 +172,10 @@ class Genesis BASE_EMBEDDED {
Handle<JSFunction> GetThrowTypeErrorFunction(); Handle<JSFunction> GetThrowTypeErrorFunction();
void CreateStrictModeFunctionMaps(Handle<JSFunction> empty); void CreateStrictModeFunctionMaps(Handle<JSFunction> empty);
// Make the "arguments" and "caller" properties throw a TypeError on access.
void PoisonArgumentsAndCaller(Handle<Map> map);
// Creates the global objects using the global and the template passed in // Creates the global objects using the global and the template passed in
// through the API. We call this regardless of whether we are building a // through the API. We call this regardless of whether we are building a
// context from scratch or using a deserialized one from the partial snapshot // context from scratch or using a deserialized one from the partial snapshot
@ -192,7 +196,7 @@ class Genesis BASE_EMBEDDED {
// detached from the other objects in the snapshot. // detached from the other objects in the snapshot.
void HookUpInnerGlobal(Handle<GlobalObject> inner_global); void HookUpInnerGlobal(Handle<GlobalObject> inner_global);
// New context initialization. Used for creating a context from scratch. // New context initialization. Used for creating a context from scratch.
void InitializeGlobal(Handle<GlobalObject> inner_global, bool InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> empty_function); Handle<JSFunction> empty_function);
void InitializeExperimentalGlobal(); void InitializeExperimentalGlobal();
// Installs the contents of the native .js files on the global objects. // Installs the contents of the native .js files on the global objects.
@ -256,14 +260,10 @@ class Genesis BASE_EMBEDDED {
Handle<Map> CreateStrictModeFunctionMap( Handle<Map> CreateStrictModeFunctionMap(
PrototypePropertyMode prototype_mode, PrototypePropertyMode prototype_mode,
Handle<JSFunction> empty_function, Handle<JSFunction> empty_function);
Handle<AccessorPair> arguments_callbacks,
Handle<AccessorPair> caller_callbacks);
Handle<DescriptorArray> ComputeStrictFunctionInstanceDescriptor( Handle<DescriptorArray> ComputeStrictFunctionInstanceDescriptor(
PrototypePropertyMode propertyMode, PrototypePropertyMode propertyMode);
Handle<AccessorPair> arguments,
Handle<AccessorPair> caller);
static bool CompileBuiltin(Isolate* isolate, int index); static bool CompileBuiltin(Isolate* isolate, int index);
static bool CompileExperimentalBuiltin(Isolate* isolate, int index); static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
@ -384,44 +384,40 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor( Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor(
PrototypePropertyMode prototypeMode) { PrototypePropertyMode prototypeMode) {
Handle<DescriptorArray> descriptors = int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5;
factory()->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(size));
? 4 PropertyAttributes attribs = static_cast<PropertyAttributes>(
: 5); DONT_ENUM | DONT_DELETE | READ_ONLY);
PropertyAttributes attributes =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
DescriptorArray::WhitenessWitness witness(*descriptors); DescriptorArray::WhitenessWitness witness(*descriptors);
{ // Add length. { // Add length.
Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionLength); Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionLength));
CallbacksDescriptor d(*factory()->length_symbol(), *foreign, attributes); CallbacksDescriptor d(*factory()->length_symbol(), *f, attribs);
descriptors->Set(0, &d, witness); descriptors->Set(0, &d, witness);
} }
{ // Add name. { // Add name.
Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionName); Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionName));
CallbacksDescriptor d(*factory()->name_symbol(), *foreign, attributes); CallbacksDescriptor d(*factory()->name_symbol(), *f, attribs);
descriptors->Set(1, &d, witness); descriptors->Set(1, &d, witness);
} }
{ // Add arguments. { // Add arguments.
Handle<Foreign> foreign = Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionArguments));
factory()->NewForeign(&Accessors::FunctionArguments); CallbacksDescriptor d(*factory()->arguments_symbol(), *f, attribs);
CallbacksDescriptor d(*factory()->arguments_symbol(), *foreign, attributes);
descriptors->Set(2, &d, witness); descriptors->Set(2, &d, witness);
} }
{ // Add caller. { // Add caller.
Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionCaller); Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionCaller));
CallbacksDescriptor d(*factory()->caller_symbol(), *foreign, attributes); CallbacksDescriptor d(*factory()->caller_symbol(), *f, attribs);
descriptors->Set(3, &d, witness); descriptors->Set(3, &d, witness);
} }
if (prototypeMode != DONT_ADD_PROTOTYPE) { if (prototypeMode != DONT_ADD_PROTOTYPE) {
// Add prototype. // Add prototype.
if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) { if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
attributes = static_cast<PropertyAttributes>(attributes & ~READ_ONLY); attribs = static_cast<PropertyAttributes>(attribs & ~READ_ONLY);
} }
Handle<Foreign> foreign = Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionPrototype));
factory()->NewForeign(&Accessors::FunctionPrototype); CallbacksDescriptor d(*factory()->prototype_symbol(), *f, attribs);
CallbacksDescriptor d(*factory()->prototype_symbol(), *foreign, attributes);
descriptors->Set(4, &d, witness); descriptors->Set(4, &d, witness);
} }
descriptors->Sort(witness); descriptors->Sort(witness);
@ -532,47 +528,42 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
Handle<DescriptorArray> Genesis::ComputeStrictFunctionInstanceDescriptor( Handle<DescriptorArray> Genesis::ComputeStrictFunctionInstanceDescriptor(
PrototypePropertyMode prototypeMode, PrototypePropertyMode prototypeMode) {
Handle<AccessorPair> arguments, int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5;
Handle<AccessorPair> caller) { Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(size));
Handle<DescriptorArray> descriptors = PropertyAttributes attribs = static_cast<PropertyAttributes>(
factory()->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE
? 4
: 5);
PropertyAttributes attributes = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE); DONT_ENUM | DONT_DELETE);
DescriptorArray::WhitenessWitness witness(*descriptors); DescriptorArray::WhitenessWitness witness(*descriptors);
{ // length { // Add length.
Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionLength); Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionLength));
CallbacksDescriptor d(*factory()->length_symbol(), *foreign, attributes); CallbacksDescriptor d(*factory()->length_symbol(), *f, attribs);
descriptors->Set(0, &d, witness); descriptors->Set(0, &d, witness);
} }
{ // name { // Add name.
Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionName); Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionName));
CallbacksDescriptor d(*factory()->name_symbol(), *foreign, attributes); CallbacksDescriptor d(*factory()->name_symbol(), *f, attribs);
descriptors->Set(1, &d, witness); descriptors->Set(1, &d, witness);
} }
{ // arguments { // Add arguments.
CallbacksDescriptor d(*factory()->arguments_symbol(), Handle<AccessorPair> arguments(factory()->NewAccessorPair());
*arguments, CallbacksDescriptor d(*factory()->arguments_symbol(), *arguments, attribs);
attributes);
descriptors->Set(2, &d, witness); descriptors->Set(2, &d, witness);
} }
{ // caller { // Add caller.
CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attributes); Handle<AccessorPair> caller(factory()->NewAccessorPair());
CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attribs);
descriptors->Set(3, &d, witness); descriptors->Set(3, &d, witness);
} }
// prototype
if (prototypeMode != DONT_ADD_PROTOTYPE) { if (prototypeMode != DONT_ADD_PROTOTYPE) {
// Add prototype.
if (prototypeMode != ADD_WRITEABLE_PROTOTYPE) { if (prototypeMode != ADD_WRITEABLE_PROTOTYPE) {
attributes = static_cast<PropertyAttributes>(attributes | READ_ONLY); attribs = static_cast<PropertyAttributes>(attribs | READ_ONLY);
} }
Handle<Foreign> foreign = Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionPrototype));
factory()->NewForeign(&Accessors::FunctionPrototype); CallbacksDescriptor d(*factory()->prototype_symbol(), *f, attribs);
CallbacksDescriptor d(*factory()->prototype_symbol(), *foreign, attributes);
descriptors->Set(4, &d, witness); descriptors->Set(4, &d, witness);
} }
@ -603,14 +594,10 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorFunction() {
Handle<Map> Genesis::CreateStrictModeFunctionMap( Handle<Map> Genesis::CreateStrictModeFunctionMap(
PrototypePropertyMode prototype_mode, PrototypePropertyMode prototype_mode,
Handle<JSFunction> empty_function, Handle<JSFunction> empty_function) {
Handle<AccessorPair> arguments_callbacks,
Handle<AccessorPair> caller_callbacks) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize); Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
Handle<DescriptorArray> descriptors = Handle<DescriptorArray> descriptors =
ComputeStrictFunctionInstanceDescriptor(prototype_mode, ComputeStrictFunctionInstanceDescriptor(prototype_mode);
arguments_callbacks,
caller_callbacks);
map->set_instance_descriptors(*descriptors); map->set_instance_descriptors(*descriptors);
map->set_function_with_prototype(prototype_mode != DONT_ADD_PROTOTYPE); map->set_function_with_prototype(prototype_mode != DONT_ADD_PROTOTYPE);
map->set_prototype(*empty_function); map->set_prototype(*empty_function);
@ -619,23 +606,15 @@ Handle<Map> Genesis::CreateStrictModeFunctionMap(
void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) { void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
// Create the callbacks arrays for ThrowTypeError functions.
// The get/set callacks are filled in after the maps are created below.
Factory* factory = empty->GetIsolate()->factory();
Handle<AccessorPair> arguments(factory->NewAccessorPair());
Handle<AccessorPair> caller(factory->NewAccessorPair());
// Allocate map for the strict mode function instances. // Allocate map for the strict mode function instances.
Handle<Map> strict_mode_function_instance_map = Handle<Map> strict_mode_function_instance_map =
CreateStrictModeFunctionMap( CreateStrictModeFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty);
ADD_WRITEABLE_PROTOTYPE, empty, arguments, caller);
global_context()->set_strict_mode_function_instance_map( global_context()->set_strict_mode_function_instance_map(
*strict_mode_function_instance_map); *strict_mode_function_instance_map);
// Allocate map for the prototype-less strict mode instances. // Allocate map for the prototype-less strict mode instances.
Handle<Map> strict_mode_function_without_prototype_map = Handle<Map> strict_mode_function_without_prototype_map =
CreateStrictModeFunctionMap( CreateStrictModeFunctionMap(DONT_ADD_PROTOTYPE, empty);
DONT_ADD_PROTOTYPE, empty, arguments, caller);
global_context()->set_strict_mode_function_without_prototype_map( global_context()->set_strict_mode_function_without_prototype_map(
*strict_mode_function_without_prototype_map); *strict_mode_function_without_prototype_map);
@ -643,26 +622,38 @@ void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
// only for processing of builtins. // only for processing of builtins.
// Later the map is replaced with writable prototype map, allocated below. // Later the map is replaced with writable prototype map, allocated below.
Handle<Map> strict_mode_function_map = Handle<Map> strict_mode_function_map =
CreateStrictModeFunctionMap( CreateStrictModeFunctionMap(ADD_READONLY_PROTOTYPE, empty);
ADD_READONLY_PROTOTYPE, empty, arguments, caller);
global_context()->set_strict_mode_function_map( global_context()->set_strict_mode_function_map(
*strict_mode_function_map); *strict_mode_function_map);
// The final map for the strict mode functions. Writeable prototype. // The final map for the strict mode functions. Writeable prototype.
// This map is installed in MakeFunctionInstancePrototypeWritable. // This map is installed in MakeFunctionInstancePrototypeWritable.
strict_mode_function_instance_map_writable_prototype_ = strict_mode_function_instance_map_writable_prototype_ =
CreateStrictModeFunctionMap( CreateStrictModeFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty);
ADD_WRITEABLE_PROTOTYPE, empty, arguments, caller);
// Create the ThrowTypeError function instance.
Handle<JSFunction> throw_function =
GetThrowTypeErrorFunction();
// Complete the callbacks. // Complete the callbacks.
arguments->set_getter(*throw_function); PoisonArgumentsAndCaller(strict_mode_function_instance_map);
arguments->set_setter(*throw_function); PoisonArgumentsAndCaller(strict_mode_function_without_prototype_map);
caller->set_getter(*throw_function); PoisonArgumentsAndCaller(strict_mode_function_map);
caller->set_setter(*throw_function); PoisonArgumentsAndCaller(
strict_mode_function_instance_map_writable_prototype_);
}
static void SetAccessors(Handle<Map> map,
Handle<String> name,
Handle<JSFunction> func) {
DescriptorArray* descs = map->instance_descriptors();
int number = descs->Search(*name);
AccessorPair* accessors = AccessorPair::cast(descs->GetValue(number));
accessors->set_getter(*func);
accessors->set_setter(*func);
}
void Genesis::PoisonArgumentsAndCaller(Handle<Map> map) {
SetAccessors(map, factory()->arguments_symbol(), GetThrowTypeErrorFunction());
SetAccessors(map, factory()->caller_symbol(), GetThrowTypeErrorFunction());
} }
@ -837,7 +828,7 @@ void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) {
// This is only called if we are not using snapshots. The equivalent // This is only called if we are not using snapshots. The equivalent
// work in the snapshot case is done in HookUpInnerGlobal. // work in the snapshot case is done in HookUpInnerGlobal.
void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> empty_function) { Handle<JSFunction> empty_function) {
// --- G l o b a l C o n t e x t --- // --- G l o b a l C o n t e x t ---
// Use the empty function as closure (no scope info). // Use the empty function as closure (no scope info).
@ -1041,7 +1032,10 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<String> name = factory->NewStringFromAscii(CStrVector("JSON")); Handle<String> name = factory->NewStringFromAscii(CStrVector("JSON"));
Handle<JSFunction> cons = factory->NewFunction(name, Handle<JSFunction> cons = factory->NewFunction(name,
factory->the_hole_value()); factory->the_hole_value());
cons->SetInstancePrototype(global_context()->initial_object_prototype()); { MaybeObject* result = cons->SetInstancePrototype(
global_context()->initial_object_prototype());
if (result->IsFailure()) return false;
}
cons->SetInstanceClassName(*name); cons->SetInstanceClassName(*name);
Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED); Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
ASSERT(json_object->IsJSObject()); ASSERT(json_object->IsJSObject());
@ -1252,6 +1246,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
global_context()->set_random_seed(*zeroed_byte_array); global_context()->set_random_seed(*zeroed_byte_array);
memset(zeroed_byte_array->GetDataStartAddress(), 0, kRandomStateSize); memset(zeroed_byte_array->GetDataStartAddress(), 0, kRandomStateSize);
} }
return true;
} }
@ -1743,7 +1738,9 @@ bool Genesis::InstallNatives() {
Handle<DescriptorArray> array_descriptors( Handle<DescriptorArray> array_descriptors(
array_function->initial_map()->instance_descriptors()); array_function->initial_map()->instance_descriptors());
int index = array_descriptors->SearchWithCache(heap()->length_symbol()); int index = array_descriptors->SearchWithCache(heap()->length_symbol());
MaybeObject* copy_result =
reresult_descriptors->CopyFrom(0, *array_descriptors, index, witness); reresult_descriptors->CopyFrom(0, *array_descriptors, index, witness);
if (copy_result->IsFailure()) return false;
int enum_index = 0; int enum_index = 0;
{ {
@ -2321,7 +2318,7 @@ Genesis::Genesis(Isolate* isolate,
Handle<JSGlobalProxy> global_proxy = Handle<JSGlobalProxy> global_proxy =
CreateNewGlobals(global_template, global_object, &inner_global); CreateNewGlobals(global_template, global_object, &inner_global);
HookUpGlobalProxy(inner_global, global_proxy); HookUpGlobalProxy(inner_global, global_proxy);
InitializeGlobal(inner_global, empty_function); if (!InitializeGlobal(inner_global, empty_function)) return;
InstallJSFunctionResultCaches(); InstallJSFunctionResultCaches();
InitializeNormalizedMapCaches(); InitializeNormalizedMapCaches();
if (!InstallNatives()) return; if (!InstallNatives()) return;

12
deps/v8/src/builtins.cc

@ -206,8 +206,7 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
} }
} else { } else {
// Allocate the JS Array // Allocate the JS Array
MaybeObject* maybe_obj = MaybeObject* maybe_obj = heap->AllocateJSObject(constructor);
heap->AllocateEmptyJSArray(FAST_SMI_ONLY_ELEMENTS);
if (!maybe_obj->To(&array)) return maybe_obj; if (!maybe_obj->To(&array)) return maybe_obj;
} }
@ -218,12 +217,13 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
if (obj->IsSmi()) { if (obj->IsSmi()) {
int len = Smi::cast(obj)->value(); int len = Smi::cast(obj)->value();
if (len >= 0 && len < JSObject::kInitialMaxFastElementArray) { if (len >= 0 && len < JSObject::kInitialMaxFastElementArray) {
Object* obj; Object* fixed_array;
{ MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len); { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
if (!maybe_obj->ToObject(&obj)) return maybe_obj; if (!maybe_obj->ToObject(&fixed_array)) return maybe_obj;
} }
MaybeObject* maybe_obj = array->SetContent(FixedArray::cast(obj)); // We do not use SetContent to skip the unnecessary elements type check.
if (maybe_obj->IsFailure()) return maybe_obj; array->set_elements(FixedArray::cast(fixed_array));
array->set_length(Smi::cast(obj));
return array; return array;
} }
} }

2
deps/v8/src/char-predicates.h

@ -57,6 +57,8 @@ struct IdentifierPart {
static inline bool Is(uc32 c) { static inline bool Is(uc32 c) {
return IdentifierStart::Is(c) return IdentifierStart::Is(c)
|| unibrow::Number::Is(c) || unibrow::Number::Is(c)
|| c == 0x200C // U+200C is Zero-Width Non-Joiner.
|| c == 0x200D // U+200D is Zero-Width Joiner.
|| unibrow::CombiningMark::Is(c) || unibrow::CombiningMark::Is(c)
|| unibrow::ConnectorPunctuation::Is(c); || unibrow::ConnectorPunctuation::Is(c);
} }

15
deps/v8/src/code-stubs.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -296,12 +296,14 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
case FAST_SMI_ONLY_ELEMENTS: { case FAST_SMI_ONLY_ELEMENTS: {
KeyedStoreStubCompiler::GenerateStoreFastElement(masm, KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_js_array_, is_js_array_,
elements_kind_); elements_kind_,
grow_mode_);
} }
break; break;
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm, KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
is_js_array_); is_js_array_,
grow_mode_);
break; break;
case EXTERNAL_BYTE_ELEMENTS: case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
@ -440,10 +442,13 @@ void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
} }
KeyedStoreStubCompiler::GenerateStoreFastElement(masm, KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_jsarray_, is_jsarray_,
FAST_ELEMENTS); FAST_ELEMENTS,
grow_mode_);
} else if (from_ == FAST_SMI_ONLY_ELEMENTS && to_ == FAST_DOUBLE_ELEMENTS) { } else if (from_ == FAST_SMI_ONLY_ELEMENTS && to_ == FAST_DOUBLE_ELEMENTS) {
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail); ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm, is_jsarray_); KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
is_jsarray_,
grow_mode_);
} else { } else {
UNREACHABLE(); UNREACHABLE();
} }

48
deps/v8/src/code-stubs.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -55,6 +55,7 @@ namespace internal {
V(ConvertToDouble) \ V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \ V(WriteInt32ToHeapNumber) \
V(StackCheck) \ V(StackCheck) \
V(Interrupt) \
V(FastNewClosure) \ V(FastNewClosure) \
V(FastNewContext) \ V(FastNewContext) \
V(FastNewBlockContext) \ V(FastNewBlockContext) \
@ -297,6 +298,18 @@ class StackCheckStub : public CodeStub {
}; };
class InterruptStub : public CodeStub {
public:
InterruptStub() { }
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return Interrupt; }
int MinorKey() { return 0; }
};
class ToNumberStub: public CodeStub { class ToNumberStub: public CodeStub {
public: public:
ToNumberStub() { } ToNumberStub() { }
@ -632,9 +645,6 @@ class CEntryStub : public CodeStub {
Label* throw_out_of_memory_exception, Label* throw_out_of_memory_exception,
bool do_gc, bool do_gc,
bool always_allocate_scope); bool always_allocate_scope);
void GenerateThrowTOS(MacroAssembler* masm);
void GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type);
// Number of pointers/values returned. // Number of pointers/values returned.
const int result_size_; const int result_size_;
@ -985,20 +995,29 @@ class KeyedLoadElementStub : public CodeStub {
class KeyedStoreElementStub : public CodeStub { class KeyedStoreElementStub : public CodeStub {
public: public:
KeyedStoreElementStub(bool is_js_array, KeyedStoreElementStub(bool is_js_array,
ElementsKind elements_kind) ElementsKind elements_kind,
KeyedAccessGrowMode grow_mode)
: is_js_array_(is_js_array), : is_js_array_(is_js_array),
elements_kind_(elements_kind) { } elements_kind_(elements_kind),
grow_mode_(grow_mode) { }
Major MajorKey() { return KeyedStoreElement; } Major MajorKey() { return KeyedStoreElement; }
int MinorKey() { int MinorKey() {
return (is_js_array_ ? 0 : kElementsKindCount) + elements_kind_; return ElementsKindBits::encode(elements_kind_) |
IsJSArrayBits::encode(is_js_array_) |
GrowModeBits::encode(grow_mode_);
} }
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
private: private:
class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
class GrowModeBits: public BitField<KeyedAccessGrowMode, 8, 1> {};
class IsJSArrayBits: public BitField<bool, 9, 1> {};
bool is_js_array_; bool is_js_array_;
ElementsKind elements_kind_; ElementsKind elements_kind_;
KeyedAccessGrowMode grow_mode_;
DISALLOW_COPY_AND_ASSIGN(KeyedStoreElementStub); DISALLOW_COPY_AND_ASSIGN(KeyedStoreElementStub);
}; };
@ -1076,24 +1095,28 @@ class ElementsTransitionAndStoreStub : public CodeStub {
ElementsTransitionAndStoreStub(ElementsKind from, ElementsTransitionAndStoreStub(ElementsKind from,
ElementsKind to, ElementsKind to,
bool is_jsarray, bool is_jsarray,
StrictModeFlag strict_mode) StrictModeFlag strict_mode,
KeyedAccessGrowMode grow_mode)
: from_(from), : from_(from),
to_(to), to_(to),
is_jsarray_(is_jsarray), is_jsarray_(is_jsarray),
strict_mode_(strict_mode) {} strict_mode_(strict_mode),
grow_mode_(grow_mode) {}
private: private:
class FromBits: public BitField<ElementsKind, 0, 8> {}; class FromBits: public BitField<ElementsKind, 0, 8> {};
class ToBits: public BitField<ElementsKind, 8, 8> {}; class ToBits: public BitField<ElementsKind, 8, 8> {};
class IsJSArrayBits: public BitField<bool, 16, 8> {}; class IsJSArrayBits: public BitField<bool, 16, 1> {};
class StrictModeBits: public BitField<StrictModeFlag, 24, 8> {}; class StrictModeBits: public BitField<StrictModeFlag, 17, 1> {};
class GrowModeBits: public BitField<KeyedAccessGrowMode, 18, 1> {};
Major MajorKey() { return ElementsTransitionAndStore; } Major MajorKey() { return ElementsTransitionAndStore; }
int MinorKey() { int MinorKey() {
return FromBits::encode(from_) | return FromBits::encode(from_) |
ToBits::encode(to_) | ToBits::encode(to_) |
IsJSArrayBits::encode(is_jsarray_) | IsJSArrayBits::encode(is_jsarray_) |
StrictModeBits::encode(strict_mode_); StrictModeBits::encode(strict_mode_) |
GrowModeBits::encode(grow_mode_);
} }
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
@ -1102,6 +1125,7 @@ class ElementsTransitionAndStoreStub : public CodeStub {
ElementsKind to_; ElementsKind to_;
bool is_jsarray_; bool is_jsarray_;
StrictModeFlag strict_mode_; StrictModeFlag strict_mode_;
KeyedAccessGrowMode grow_mode_;
DISALLOW_COPY_AND_ASSIGN(ElementsTransitionAndStoreStub); DISALLOW_COPY_AND_ASSIGN(ElementsTransitionAndStoreStub);
}; };

7
deps/v8/src/collection.js

@ -25,10 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"use strict";
const $Set = global.Set; var $Set = global.Set;
const $Map = global.Map; var $Map = global.Map;
const $WeakMap = global.WeakMap; var $WeakMap = global.WeakMap;
//------------------------------------------------------------------- //-------------------------------------------------------------------

36
deps/v8/src/compiler.cc

@ -61,7 +61,7 @@ CompilationInfo::CompilationInfo(Handle<Script> script)
extension_(NULL), extension_(NULL),
pre_parse_data_(NULL), pre_parse_data_(NULL),
osr_ast_id_(AstNode::kNoNumber) { osr_ast_id_(AstNode::kNoNumber) {
Initialize(NONOPT); Initialize(BASE);
} }
@ -182,10 +182,8 @@ static void FinishOptimization(Handle<JSFunction> function, int64_t start) {
static bool MakeCrankshaftCode(CompilationInfo* info) { static bool MakeCrankshaftCode(CompilationInfo* info) {
// Test if we can optimize this function when asked to. We can only // Test if we can optimize this function when asked to. We can only
// do this after the scopes are computed. // do this after the scopes are computed.
if (!info->AllowOptimize()) { if (!V8::UseCrankshaft()) {
info->DisableOptimization(); info->DisableOptimization();
} else if (info->IsOptimizable()) {
info->EnableDeoptimizationSupport();
} }
// In case we are not optimizing simply return the code from // In case we are not optimizing simply return the code from
@ -217,8 +215,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
FLAG_deopt_every_n_times == 0 ? Compiler::kDefaultMaxOptCount : 1000; FLAG_deopt_every_n_times == 0 ? Compiler::kDefaultMaxOptCount : 1000;
if (info->shared_info()->opt_count() > kMaxOptCount) { if (info->shared_info()->opt_count() > kMaxOptCount) {
info->AbortOptimization(); info->AbortOptimization();
Handle<JSFunction> closure = info->closure(); info->shared_info()->DisableOptimization();
info->shared_info()->DisableOptimization(*closure);
// True indicates the compilation pipeline is still going, not // True indicates the compilation pipeline is still going, not
// necessarily that we optimized the code. // necessarily that we optimized the code.
return true; return true;
@ -238,8 +235,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
(info->osr_ast_id() != AstNode::kNoNumber && (info->osr_ast_id() != AstNode::kNoNumber &&
scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit)) { scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit)) {
info->AbortOptimization(); info->AbortOptimization();
Handle<JSFunction> closure = info->closure(); info->shared_info()->DisableOptimization();
info->shared_info()->DisableOptimization(*closure);
// True indicates the compilation pipeline is still going, not // True indicates the compilation pipeline is still going, not
// necessarily that we optimized the code. // necessarily that we optimized the code.
return true; return true;
@ -317,8 +313,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
if (!builder.inline_bailout()) { if (!builder.inline_bailout()) {
// Mark the shared code as unoptimizable unless it was an inlined // Mark the shared code as unoptimizable unless it was an inlined
// function that bailed out. // function that bailed out.
Handle<JSFunction> closure = info->closure(); info->shared_info()->DisableOptimization();
info->shared_info()->DisableOptimization(*closure);
} }
// True indicates the compilation pipeline is still going, not necessarily // True indicates the compilation pipeline is still going, not necessarily
// that we optimized the code. // that we optimized the code.
@ -502,13 +497,6 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
// for small sources, odds are that there aren't many functions // for small sources, odds are that there aren't many functions
// that would be compiled lazily anyway, so we skip the preparse step // that would be compiled lazily anyway, so we skip the preparse step
// in that case too. // in that case too.
int flags = kNoParsingFlags;
if ((natives == NATIVES_CODE) || FLAG_allow_natives_syntax) {
flags |= kAllowNativesSyntax;
}
if (natives != NATIVES_CODE && FLAG_harmony_scoping) {
flags |= EXTENDED_MODE;
}
// Create a script object describing the script to be compiled. // Create a script object describing the script to be compiled.
Handle<Script> script = FACTORY->NewScript(source); Handle<Script> script = FACTORY->NewScript(source);
@ -529,6 +517,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
info.MarkAsGlobal(); info.MarkAsGlobal();
info.SetExtension(extension); info.SetExtension(extension);
info.SetPreParseData(pre_data); info.SetPreParseData(pre_data);
if (FLAG_use_strict) info.SetLanguageMode(STRICT_MODE);
result = MakeFunctionInfo(&info); result = MakeFunctionInfo(&info);
if (extension == NULL && !result.is_null()) { if (extension == NULL && !result.is_null()) {
compilation_cache->PutScript(source, result); compilation_cache->PutScript(source, result);
@ -573,6 +562,10 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
info.SetCallingContext(context); info.SetCallingContext(context);
result = MakeFunctionInfo(&info); result = MakeFunctionInfo(&info);
if (!result.is_null()) { if (!result.is_null()) {
// Explicitly disable optimization for eval code. We're not yet prepared
// to handle eval-code in the optimizing compiler.
result->DisableOptimization();
// If caller is strict mode, the result must be in strict mode or // If caller is strict mode, the result must be in strict mode or
// extended mode as well, but not the other way around. Consider: // extended mode as well, but not the other way around. Consider:
// eval("'use strict'; ..."); // eval("'use strict'; ...");
@ -664,11 +657,13 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
// Check the function has compiled code. // Check the function has compiled code.
ASSERT(shared->is_compiled()); ASSERT(shared->is_compiled());
shared->set_code_age(0); shared->set_code_age(0);
shared->set_dont_crankshaft(lit->flags()->Contains(kDontOptimize)); shared->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
shared->set_dont_inline(lit->flags()->Contains(kDontInline)); shared->set_dont_inline(lit->flags()->Contains(kDontInline));
shared->set_ast_node_count(lit->ast_node_count()); shared->set_ast_node_count(lit->ast_node_count());
if (info->AllowOptimize() && !shared->optimization_disabled()) { if (V8::UseCrankshaft()&&
!function.is_null() &&
!shared->optimization_disabled()) {
// If we're asked to always optimize, we compile the optimized // If we're asked to always optimize, we compile the optimized
// version of the function right away - unless the debugger is // version of the function right away - unless the debugger is
// active as it makes no sense to compile optimized code then. // active as it makes no sense to compile optimized code then.
@ -766,7 +761,8 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
function_info->set_uses_arguments(lit->scope()->arguments() != NULL); function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters()); function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
function_info->set_ast_node_count(lit->ast_node_count()); function_info->set_ast_node_count(lit->ast_node_count());
function_info->set_dont_crankshaft(lit->flags()->Contains(kDontOptimize)); function_info->set_is_function(lit->is_function());
function_info->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
function_info->set_dont_inline(lit->flags()->Contains(kDontInline)); function_info->set_dont_inline(lit->flags()->Contains(kDontInline));
} }

12
deps/v8/src/compiler.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -163,11 +163,6 @@ class CompilationInfo BASE_EMBEDDED {
flags_ |= SupportsDeoptimization::encode(true); flags_ |= SupportsDeoptimization::encode(true);
} }
// Determine whether or not we can adaptively optimize.
bool AllowOptimize() {
return V8::UseCrankshaft() && !closure_.is_null();
}
// Determines whether or not to insert a self-optimization header. // Determines whether or not to insert a self-optimization header.
bool ShouldSelfOptimize(); bool ShouldSelfOptimize();
@ -181,9 +176,8 @@ class CompilationInfo BASE_EMBEDDED {
// Compilation mode. // Compilation mode.
// BASE is generated by the full codegen, optionally prepared for bailouts. // BASE is generated by the full codegen, optionally prepared for bailouts.
// OPTIMIZE is optimized code generated by the Hydrogen-based backend. // OPTIMIZE is optimized code generated by the Hydrogen-based backend.
// NONOPT is generated by the full codegen or the classic backend // NONOPT is generated by the full codegen and is not prepared for
// and is not prepared for recompilation/bailouts. These functions // recompilation/bailouts. These functions are never recompiled.
// are never recompiled.
enum Mode { enum Mode {
BASE, BASE,
OPTIMIZE, OPTIMIZE,

4
deps/v8/src/contexts.h

@ -356,6 +356,10 @@ class Context: public FixedArray {
Map* map = this->map(); Map* map = this->map();
return map == map->GetHeap()->block_context_map(); return map == map->GetHeap()->block_context_map();
} }
bool IsModuleContext() {
Map* map = this->map();
return map == map->GetHeap()->module_context_map();
}
// Tells whether the global context is marked with out of memory. // Tells whether the global context is marked with out of memory.
inline bool has_out_of_memory(); inline bool has_out_of_memory();

24
deps/v8/src/d8.js

@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"use strict";
String.prototype.startsWith = function (str) { String.prototype.startsWith = function (str) {
if (str.length > this.length) { if (str.length > this.length) {
return false; return false;
@ -76,7 +78,7 @@ function GetCompletions(global, last, full) {
// Global object holding debugger related constants and state. // Global object holding debugger related constants and state.
const Debug = {}; var Debug = {};
// Debug events which can occour in the V8 JavaScript engine. These originate // Debug events which can occour in the V8 JavaScript engine. These originate
@ -111,7 +113,7 @@ Debug.ScopeType = { Global: 0,
// Current debug state. // Current debug state.
const kNoFrame = -1; var kNoFrame = -1;
Debug.State = { Debug.State = {
currentFrame: kNoFrame, currentFrame: kNoFrame,
displaySourceStartLine: -1, displaySourceStartLine: -1,
@ -123,8 +125,8 @@ var trace_debug_json = false; // Tracing all debug json packets?
var last_cmd_line = ''; var last_cmd_line = '';
//var lol_is_enabled; // Set to true in d8.cc if LIVE_OBJECT_LIST is defined. //var lol_is_enabled; // Set to true in d8.cc if LIVE_OBJECT_LIST is defined.
var lol_next_dump_index = 0; var lol_next_dump_index = 0;
const kDefaultLolLinesToPrintAtATime = 10; var kDefaultLolLinesToPrintAtATime = 10;
const kMaxLolLinesToPrintAtATime = 1000; var kMaxLolLinesToPrintAtATime = 1000;
var repeat_cmd_line = ''; var repeat_cmd_line = '';
var is_running = true; var is_running = true;
@ -2629,7 +2631,7 @@ function NumberToJSON_(value) {
// Mapping of some control characters to avoid the \uXXXX syntax for most // Mapping of some control characters to avoid the \uXXXX syntax for most
// commonly used control cahracters. // commonly used control cahracters.
const ctrlCharMap_ = { var ctrlCharMap_ = {
'\b': '\\b', '\b': '\\b',
'\t': '\\t', '\t': '\\t',
'\n': '\\n', '\n': '\\n',
@ -2641,12 +2643,12 @@ const ctrlCharMap_ = {
// Regular expression testing for ", \ and control characters (0x00 - 0x1F). // Regular expression testing for ", \ and control characters (0x00 - 0x1F).
const ctrlCharTest_ = new RegExp('["\\\\\x00-\x1F]'); var ctrlCharTest_ = new RegExp('["\\\\\x00-\x1F]');
// Regular expression matching ", \ and control characters (0x00 - 0x1F) // Regular expression matching ", \ and control characters (0x00 - 0x1F)
// globally. // globally.
const ctrlCharMatch_ = new RegExp('["\\\\\x00-\x1F]', 'g'); var ctrlCharMatch_ = new RegExp('["\\\\\x00-\x1F]', 'g');
/** /**
@ -2688,12 +2690,12 @@ function StringToJSON_(value) {
* @return {string} JSON formatted Date value * @return {string} JSON formatted Date value
*/ */
function DateToISO8601_(value) { function DateToISO8601_(value) {
function f(n) { var f = function(n) {
return n < 10 ? '0' + n : n; return n < 10 ? '0' + n : n;
} };
function g(n) { var g = function(n) {
return n < 10 ? '00' + n : n < 100 ? '0' + n : n; return n < 10 ? '00' + n : n < 100 ? '0' + n : n;
} };
return builtins.GetUTCFullYearFrom(value) + '-' + return builtins.GetUTCFullYearFrom(value) + '-' +
f(builtins.GetUTCMonthFrom(value) + 1) + '-' + f(builtins.GetUTCMonthFrom(value) + 1) + '-' +
f(builtins.GetUTCDateFrom(value)) + 'T' + f(builtins.GetUTCDateFrom(value)) + 'T' +

10
deps/v8/src/data-flow.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -85,18 +85,18 @@ class BitVector: public ZoneObject {
friend class BitVector; friend class BitVector;
}; };
explicit BitVector(int length) BitVector(int length, Zone* zone)
: length_(length), : length_(length),
data_length_(SizeFor(length)), data_length_(SizeFor(length)),
data_(ZONE->NewArray<uint32_t>(data_length_)) { data_(zone->NewArray<uint32_t>(data_length_)) {
ASSERT(length > 0); ASSERT(length > 0);
Clear(); Clear();
} }
BitVector(const BitVector& other) BitVector(const BitVector& other, Zone* zone)
: length_(other.length()), : length_(other.length()),
data_length_(SizeFor(length_)), data_length_(SizeFor(length_)),
data_(ZONE->NewArray<uint32_t>(data_length_)) { data_(zone->NewArray<uint32_t>(data_length_)) {
CopyFrom(other); CopyFrom(other);
} }

5
deps/v8/src/date.js

@ -28,17 +28,16 @@
// This file relies on the fact that the following declarations have been made // This file relies on the fact that the following declarations have been made
// in v8natives.js: // in v8natives.js:
// const $isFinite = GlobalIsFinite; // var $isFinite = GlobalIsFinite;
// ------------------------------------------------------------------- // -------------------------------------------------------------------
// This file contains date support implemented in JavaScript. // This file contains date support implemented in JavaScript.
// Keep reference to original values of some global properties. This // Keep reference to original values of some global properties. This
// has the added benefit that the code in this file is isolated from // has the added benefit that the code in this file is isolated from
// changes to these properties. // changes to these properties.
const $Date = global.Date; var $Date = global.Date;
// Helper function to throw error. // Helper function to throw error.
function ThrowDateTypeError() { function ThrowDateTypeError() {

6
deps/v8/src/debug-debugger.js

@ -26,14 +26,14 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Default number of frames to include in the response to backtrace request. // Default number of frames to include in the response to backtrace request.
const kDefaultBacktraceLength = 10; var kDefaultBacktraceLength = 10;
const Debug = {}; var Debug = {};
// Regular expression to skip "crud" at the beginning of a source line which is // Regular expression to skip "crud" at the beginning of a source line which is
// not really code. Currently the regular expression matches whitespace and // not really code. Currently the regular expression matches whitespace and
// comments. // comments.
const sourceLineBeginningSkip = /^(?:\s*(?:\/\*.*?\*\/)*)*/; var sourceLineBeginningSkip = /^(?:\s*(?:\/\*.*?\*\/)*)*/;
// Debug events which can occour in the V8 JavaScript engine. These originate // Debug events which can occour in the V8 JavaScript engine. These originate
// from the API include file debug.h. // from the API include file debug.h.

4
deps/v8/src/deoptimizer.h

@ -267,11 +267,7 @@ class Deoptimizer : public Malloced {
int ConvertJSFrameIndexToFrameIndex(int jsframe_index); int ConvertJSFrameIndexToFrameIndex(int jsframe_index);
private: private:
#ifdef V8_TARGET_ARCH_MIPS
static const int kNumberOfEntries = 4096;
#else
static const int kNumberOfEntries = 16384; static const int kNumberOfEntries = 16384;
#endif
Deoptimizer(Isolate* isolate, Deoptimizer(Isolate* isolate,
JSFunction* function, JSFunction* function,

94
deps/v8/src/elements.cc

@ -109,28 +109,27 @@ class ElementsAccessorBase : public ElementsAccessor {
uint32_t key, uint32_t key,
JSObject* obj, JSObject* obj,
Object* receiver) { Object* receiver) {
return ElementsAccessorSubclass::Get( return ElementsAccessorSubclass::GetImpl(
BackingStoreClass::cast(backing_store), key, obj, receiver); BackingStoreClass::cast(backing_store), key, obj, receiver);
} }
static MaybeObject* Get(BackingStoreClass* backing_store, static MaybeObject* GetImpl(BackingStoreClass* backing_store,
uint32_t key, uint32_t key,
JSObject* obj, JSObject* obj,
Object* receiver) { Object* receiver) {
if (key < ElementsAccessorSubclass::GetCapacity(backing_store)) { return (key < ElementsAccessorSubclass::GetCapacityImpl(backing_store))
return backing_store->get(key); ? backing_store->get(key)
} : backing_store->GetHeap()->the_hole_value();
return backing_store->GetHeap()->the_hole_value();
} }
virtual MaybeObject* SetLength(JSObject* obj, virtual MaybeObject* SetLength(JSObject* obj,
Object* length) { Object* length) {
ASSERT(obj->IsJSArray()); ASSERT(obj->IsJSArray());
return ElementsAccessorSubclass::SetLength( return ElementsAccessorSubclass::SetLengthImpl(
BackingStoreClass::cast(obj->elements()), obj, length); BackingStoreClass::cast(obj->elements()), obj, length);
} }
static MaybeObject* SetLength(BackingStoreClass* backing_store, static MaybeObject* SetLengthImpl(BackingStoreClass* backing_store,
JSObject* obj, JSObject* obj,
Object* length); Object* length);
@ -167,7 +166,7 @@ class ElementsAccessorBase : public ElementsAccessor {
} }
#endif #endif
BackingStoreClass* backing_store = BackingStoreClass::cast(from); BackingStoreClass* backing_store = BackingStoreClass::cast(from);
uint32_t len1 = ElementsAccessorSubclass::GetCapacity(backing_store); uint32_t len1 = ElementsAccessorSubclass::GetCapacityImpl(backing_store);
// Optimize if 'other' is empty. // Optimize if 'other' is empty.
// We cannot optimize if 'this' is empty, as other may have holes. // We cannot optimize if 'this' is empty, as other may have holes.
@ -176,14 +175,13 @@ class ElementsAccessorBase : public ElementsAccessor {
// Compute how many elements are not in other. // Compute how many elements are not in other.
int extra = 0; int extra = 0;
for (uint32_t y = 0; y < len1; y++) { for (uint32_t y = 0; y < len1; y++) {
if (ElementsAccessorSubclass::HasElementAtIndex(backing_store, if (ElementsAccessorSubclass::HasElementAtIndexImpl(
y, backing_store, y, holder, receiver)) {
holder,
receiver)) {
uint32_t key = uint32_t key =
ElementsAccessorSubclass::GetKeyForIndex(backing_store, y); ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y);
MaybeObject* maybe_value = MaybeObject* maybe_value =
ElementsAccessorSubclass::Get(backing_store, key, holder, receiver); ElementsAccessorSubclass::GetImpl(backing_store, key,
holder, receiver);
Object* value; Object* value;
if (!maybe_value->ToObject(&value)) return maybe_value; if (!maybe_value->ToObject(&value)) return maybe_value;
ASSERT(!value->IsTheHole()); ASSERT(!value->IsTheHole());
@ -214,14 +212,13 @@ class ElementsAccessorBase : public ElementsAccessor {
// Fill in the extra values. // Fill in the extra values.
int index = 0; int index = 0;
for (uint32_t y = 0; y < len1; y++) { for (uint32_t y = 0; y < len1; y++) {
if (ElementsAccessorSubclass::HasElementAtIndex(backing_store, if (ElementsAccessorSubclass::HasElementAtIndexImpl(
y, backing_store, y, holder, receiver)) {
holder,
receiver)) {
uint32_t key = uint32_t key =
ElementsAccessorSubclass::GetKeyForIndex(backing_store, y); ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y);
MaybeObject* maybe_value = MaybeObject* maybe_value =
ElementsAccessorSubclass::Get(backing_store, key, holder, receiver); ElementsAccessorSubclass::GetImpl(backing_store, key,
holder, receiver);
Object* value; Object* value;
if (!maybe_value->ToObject(&value)) return maybe_value; if (!maybe_value->ToObject(&value)) return maybe_value;
if (!value->IsTheHole() && !HasKey(to, value)) { if (!value->IsTheHole() && !HasKey(to, value)) {
@ -235,25 +232,23 @@ class ElementsAccessorBase : public ElementsAccessor {
} }
protected: protected:
static uint32_t GetCapacity(BackingStoreClass* backing_store) { static uint32_t GetCapacityImpl(BackingStoreClass* backing_store) {
return backing_store->length(); return backing_store->length();
} }
virtual uint32_t GetCapacity(FixedArrayBase* backing_store) { virtual uint32_t GetCapacity(FixedArrayBase* backing_store) {
return ElementsAccessorSubclass::GetCapacity( return ElementsAccessorSubclass::GetCapacityImpl(
BackingStoreClass::cast(backing_store)); BackingStoreClass::cast(backing_store));
} }
static bool HasElementAtIndex(BackingStoreClass* backing_store, static bool HasElementAtIndexImpl(BackingStoreClass* backing_store,
uint32_t index, uint32_t index,
JSObject* holder, JSObject* holder,
Object* receiver) { Object* receiver) {
uint32_t key = uint32_t key =
ElementsAccessorSubclass::GetKeyForIndex(backing_store, index); ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, index);
MaybeObject* element = ElementsAccessorSubclass::Get(backing_store, MaybeObject* element =
key, ElementsAccessorSubclass::GetImpl(backing_store, key, holder, receiver);
holder,
receiver);
return !element->IsTheHole(); return !element->IsTheHole();
} }
@ -261,18 +256,18 @@ class ElementsAccessorBase : public ElementsAccessor {
uint32_t index, uint32_t index,
JSObject* holder, JSObject* holder,
Object* receiver) { Object* receiver) {
return ElementsAccessorSubclass::HasElementAtIndex( return ElementsAccessorSubclass::HasElementAtIndexImpl(
BackingStoreClass::cast(backing_store), index, holder, receiver); BackingStoreClass::cast(backing_store), index, holder, receiver);
} }
static uint32_t GetKeyForIndex(BackingStoreClass* backing_store, static uint32_t GetKeyForIndexImpl(BackingStoreClass* backing_store,
uint32_t index) { uint32_t index) {
return index; return index;
} }
virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store, virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store,
uint32_t index) { uint32_t index) {
return ElementsAccessorSubclass::GetKeyForIndex( return ElementsAccessorSubclass::GetKeyForIndexImpl(
BackingStoreClass::cast(backing_store), index); BackingStoreClass::cast(backing_store), index);
} }
@ -446,7 +441,7 @@ class FastDoubleElementsAccessor
return obj->GetHeap()->true_value(); return obj->GetHeap()->true_value();
} }
static bool HasElementAtIndex(FixedDoubleArray* backing_store, static bool HasElementAtIndexImpl(FixedDoubleArray* backing_store,
uint32_t index, uint32_t index,
JSObject* holder, JSObject* holder,
Object* receiver) { Object* receiver) {
@ -465,18 +460,17 @@ class ExternalElementsAccessor
friend class ElementsAccessorBase<ExternalElementsAccessorSubclass, friend class ElementsAccessorBase<ExternalElementsAccessorSubclass,
ExternalArray>; ExternalArray>;
static MaybeObject* Get(ExternalArray* backing_store, static MaybeObject* GetImpl(ExternalArray* backing_store,
uint32_t key, uint32_t key,
JSObject* obj, JSObject* obj,
Object* receiver) { Object* receiver) {
if (key < ExternalElementsAccessorSubclass::GetCapacity(backing_store)) { return
return backing_store->get(key); key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
} else { ? backing_store->get(key)
return backing_store->GetHeap()->undefined_value(); : backing_store->GetHeap()->undefined_value();
}
} }
static MaybeObject* SetLength(ExternalArray* backing_store, static MaybeObject* SetLengthImpl(ExternalArray* backing_store,
JSObject* obj, JSObject* obj,
Object* length) { Object* length) {
// External arrays do not support changing their length. // External arrays do not support changing their length.
@ -663,7 +657,7 @@ class DictionaryElementsAccessor
return DeleteCommon(obj, key, mode); return DeleteCommon(obj, key, mode);
} }
static MaybeObject* Get(SeededNumberDictionary* backing_store, static MaybeObject* GetImpl(SeededNumberDictionary* backing_store,
uint32_t key, uint32_t key,
JSObject* obj, JSObject* obj,
Object* receiver) { Object* receiver) {
@ -683,7 +677,7 @@ class DictionaryElementsAccessor
return obj->GetHeap()->the_hole_value(); return obj->GetHeap()->the_hole_value();
} }
static uint32_t GetKeyForIndex(SeededNumberDictionary* dict, static uint32_t GetKeyForIndexImpl(SeededNumberDictionary* dict,
uint32_t index) { uint32_t index) {
Object* key = dict->KeyAt(index); Object* key = dict->KeyAt(index);
return Smi::cast(key)->value(); return Smi::cast(key)->value();
@ -698,7 +692,7 @@ class NonStrictArgumentsElementsAccessor
friend class ElementsAccessorBase<NonStrictArgumentsElementsAccessor, friend class ElementsAccessorBase<NonStrictArgumentsElementsAccessor,
FixedArray>; FixedArray>;
static MaybeObject* Get(FixedArray* parameter_map, static MaybeObject* GetImpl(FixedArray* parameter_map,
uint32_t key, uint32_t key,
JSObject* obj, JSObject* obj,
Object* receiver) { Object* receiver) {
@ -718,7 +712,7 @@ class NonStrictArgumentsElementsAccessor
} }
} }
static MaybeObject* SetLength(FixedArray* parameter_map, static MaybeObject* SetLengthImpl(FixedArray* parameter_map,
JSObject* obj, JSObject* obj,
Object* length) { Object* length) {
// TODO(mstarzinger): This was never implemented but will be used once we // TODO(mstarzinger): This was never implemented but will be used once we
@ -748,18 +742,18 @@ class NonStrictArgumentsElementsAccessor
return obj->GetHeap()->true_value(); return obj->GetHeap()->true_value();
} }
static uint32_t GetCapacity(FixedArray* parameter_map) { static uint32_t GetCapacityImpl(FixedArray* parameter_map) {
FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1)); FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
return Max(static_cast<uint32_t>(parameter_map->length() - 2), return Max(static_cast<uint32_t>(parameter_map->length() - 2),
ForArray(arguments)->GetCapacity(arguments)); ForArray(arguments)->GetCapacity(arguments));
} }
static uint32_t GetKeyForIndex(FixedArray* dict, static uint32_t GetKeyForIndexImpl(FixedArray* dict,
uint32_t index) { uint32_t index) {
return index; return index;
} }
static bool HasElementAtIndex(FixedArray* parameter_map, static bool HasElementAtIndexImpl(FixedArray* parameter_map,
uint32_t index, uint32_t index,
JSObject* holder, JSObject* holder,
Object* receiver) { Object* receiver) {
@ -866,7 +860,7 @@ void ElementsAccessor::InitializeOncePerProcess() {
template <typename ElementsAccessorSubclass, typename BackingStoreClass> template <typename ElementsAccessorSubclass, typename BackingStoreClass>
MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass, BackingStoreClass>:: MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass, BackingStoreClass>::
SetLength(BackingStoreClass* backing_store, SetLengthImpl(BackingStoreClass* backing_store,
JSObject* obj, JSObject* obj,
Object* length) { Object* length) {
JSArray* array = JSArray::cast(obj); JSArray* array = JSArray::cast(obj);
@ -917,7 +911,9 @@ MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass, BackingStoreClass>::
MaybeObject* maybe_obj = array->GetHeap()->AllocateFixedArray(1); MaybeObject* maybe_obj = array->GetHeap()->AllocateFixedArray(1);
if (!maybe_obj->To(&new_backing_store)) return maybe_obj; if (!maybe_obj->To(&new_backing_store)) return maybe_obj;
new_backing_store->set(0, length); new_backing_store->set(0, length);
array->SetContent(new_backing_store); { MaybeObject* result = array->SetContent(new_backing_store);
if (result->IsFailure()) return result;
}
return array; return array;
} }

7
deps/v8/src/execution.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -882,7 +882,9 @@ MaybeObject* Execution::HandleStackGuardInterrupt() {
} }
isolate->counters()->stack_interrupts()->Increment(); isolate->counters()->stack_interrupts()->Increment();
if (stack_guard->IsRuntimeProfilerTick()) { // If FLAG_count_based_interrupts, every interrupt is a profiler interrupt.
if (FLAG_count_based_interrupts ||
stack_guard->IsRuntimeProfilerTick()) {
isolate->counters()->runtime_profiler_ticks()->Increment(); isolate->counters()->runtime_profiler_ticks()->Increment();
stack_guard->Continue(RUNTIME_PROFILER_TICK); stack_guard->Continue(RUNTIME_PROFILER_TICK);
isolate->runtime_profiler()->OptimizeNow(); isolate->runtime_profiler()->OptimizeNow();
@ -904,4 +906,5 @@ MaybeObject* Execution::HandleStackGuardInterrupt() {
return isolate->heap()->undefined_value(); return isolate->heap()->undefined_value();
} }
} } // namespace v8::internal } } // namespace v8::internal

15
deps/v8/src/factory.cc

@ -148,6 +148,13 @@ Handle<AccessorPair> Factory::NewAccessorPair() {
} }
Handle<TypeFeedbackInfo> Factory::NewTypeFeedbackInfo() {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocateTypeFeedbackInfo(),
TypeFeedbackInfo);
}
// Symbols are created in the old generation (data space). // Symbols are created in the old generation (data space).
Handle<String> Factory::LookupSymbol(Vector<const char> string) { Handle<String> Factory::LookupSymbol(Vector<const char> string) {
CALL_HEAP_FUNCTION(isolate(), CALL_HEAP_FUNCTION(isolate(),
@ -540,11 +547,7 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
context->global_context()); context->global_context());
} }
result->set_literals(*literals); result->set_literals(*literals);
} else {
result->set_function_bindings(isolate()->heap()->empty_fixed_array());
} }
result->set_next_function_link(isolate()->heap()->undefined_value());
if (V8::UseCrankshaft() && if (V8::UseCrankshaft() &&
FLAG_always_opt && FLAG_always_opt &&
result->is_compiled() && result->is_compiled() &&
@ -865,7 +868,7 @@ Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors(
// Copy the descriptors from the array. // Copy the descriptors from the array.
for (int i = 0; i < array->number_of_descriptors(); i++) { for (int i = 0; i < array->number_of_descriptors(); i++) {
if (!array->IsNullDescriptor(i)) { if (!array->IsNullDescriptor(i)) {
result->CopyFrom(descriptor_count++, *array, i, witness); DescriptorArray::CopyFrom(result, descriptor_count++, array, i, witness);
} }
} }
@ -899,7 +902,7 @@ Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors(
Handle<DescriptorArray> new_result = Handle<DescriptorArray> new_result =
NewDescriptorArray(number_of_descriptors); NewDescriptorArray(number_of_descriptors);
for (int i = 0; i < number_of_descriptors; i++) { for (int i = 0; i < number_of_descriptors; i++) {
new_result->CopyFrom(i, *result, i, witness); DescriptorArray::CopyFrom(new_result, i, result, i, witness);
} }
result = new_result; result = new_result;
} }

2
deps/v8/src/factory.h

@ -76,6 +76,8 @@ class Factory {
// Allocates a pre-tenured empty AccessorPair. // Allocates a pre-tenured empty AccessorPair.
Handle<AccessorPair> NewAccessorPair(); Handle<AccessorPair> NewAccessorPair();
Handle<TypeFeedbackInfo> NewTypeFeedbackInfo();
Handle<String> LookupSymbol(Vector<const char> str); Handle<String> LookupSymbol(Vector<const char> str);
Handle<String> LookupSymbol(Handle<String> str); Handle<String> LookupSymbol(Handle<String> str);
Handle<String> LookupAsciiSymbol(Vector<const char> str); Handle<String> LookupAsciiSymbol(Vector<const char> str);

27
deps/v8/src/flag-definitions.h

@ -106,10 +106,13 @@ private:
// //
#define FLAG FLAG_FULL #define FLAG FLAG_FULL
// Flags for experimental language features. // Flags for language modes and experimental language features.
DEFINE_bool(use_strict, false, "enforce strict mode")
DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof") DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
DEFINE_bool(harmony_scoping, false, "enable harmony block scoping") DEFINE_bool(harmony_scoping, false, "enable harmony block scoping")
DEFINE_bool(harmony_modules, false, "enable harmony modules") DEFINE_bool(harmony_modules, false,
"enable harmony modules (implies block scoping)")
DEFINE_bool(harmony_proxies, false, "enable harmony proxies") DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
DEFINE_bool(harmony_collections, false, DEFINE_bool(harmony_collections, false,
"enable harmony collections (sets, maps, and weak maps)") "enable harmony collections (sets, maps, and weak maps)")
@ -118,9 +121,10 @@ DEFINE_implication(harmony, harmony_scoping)
DEFINE_implication(harmony, harmony_modules) DEFINE_implication(harmony, harmony_modules)
DEFINE_implication(harmony, harmony_proxies) DEFINE_implication(harmony, harmony_proxies)
DEFINE_implication(harmony, harmony_collections) DEFINE_implication(harmony, harmony_collections)
DEFINE_implication(harmony_modules, harmony_scoping)
// Flags for experimental implementation features. // Flags for experimental implementation features.
DEFINE_bool(smi_only_arrays, false, "tracks arrays with only smi values") DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
DEFINE_bool(clever_optimizations, DEFINE_bool(clever_optimizations,
true, true,
"Optimize object size, Array shift, DOM strings and string +") "Optimize object size, Array shift, DOM strings and string +")
@ -168,11 +172,28 @@ DEFINE_int(loop_weight, 1, "loop weight for representation inference")
// Experimental profiler changes. // Experimental profiler changes.
DEFINE_bool(experimental_profiler, false, "enable all profiler experiments") DEFINE_bool(experimental_profiler, false, "enable all profiler experiments")
DEFINE_bool(watch_ic_patching, false, "profiler considers IC stability") DEFINE_bool(watch_ic_patching, false, "profiler considers IC stability")
DEFINE_int(frame_count, 2, "number of stack frames inspected by the profiler")
DEFINE_bool(self_optimization, false, DEFINE_bool(self_optimization, false,
"primitive functions trigger their own optimization") "primitive functions trigger their own optimization")
DEFINE_bool(count_based_interrupts, false,
"trigger profiler ticks based on counting instead of timing")
DEFINE_bool(interrupt_at_exit, false,
"insert an interrupt check at function exit")
DEFINE_bool(weighted_back_edges, false,
"weight back edges by jump distance for interrupt triggering")
DEFINE_int(interrupt_budget, 10000,
"execution budget before interrupt is triggered")
DEFINE_int(type_info_threshold, 0,
"percentage of ICs that must have type info to allow optimization")
DEFINE_implication(experimental_profiler, watch_ic_patching) DEFINE_implication(experimental_profiler, watch_ic_patching)
DEFINE_implication(experimental_profiler, self_optimization) DEFINE_implication(experimental_profiler, self_optimization)
DEFINE_implication(experimental_profiler, count_based_interrupts)
DEFINE_implication(experimental_profiler, interrupt_at_exit)
DEFINE_implication(experimental_profiler, weighted_back_edges)
DEFINE_bool(trace_opt_verbose, false, "extra verbose compilation tracing")
DEFINE_implication(trace_opt_verbose, trace_opt)
// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc // assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
DEFINE_bool(debug_code, false, DEFINE_bool(debug_code, false,

22
deps/v8/src/full-codegen.cc

@ -291,8 +291,8 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
masm.positions_recorder()->StartGDBJITLineInfoRecording(); masm.positions_recorder()->StartGDBJITLineInfoRecording();
#endif #endif
FullCodeGenerator cgen(&masm); FullCodeGenerator cgen(&masm, info);
cgen.Generate(info); cgen.Generate();
if (cgen.HasStackOverflow()) { if (cgen.HasStackOverflow()) {
ASSERT(!isolate->has_pending_exception()); ASSERT(!isolate->has_pending_exception());
return false; return false;
@ -303,6 +303,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info); Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
code->set_optimizable(info->IsOptimizable()); code->set_optimizable(info->IsOptimizable());
cgen.PopulateDeoptimizationData(code); cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
cgen.PopulateTypeFeedbackCells(code); cgen.PopulateTypeFeedbackCells(code);
code->set_has_deoptimization_support(info->HasDeoptimizationSupport()); code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
code->set_handler_table(*cgen.handler_table()); code->set_handler_table(*cgen.handler_table());
@ -361,6 +362,13 @@ void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
} }
void FullCodeGenerator::PopulateTypeFeedbackInfo(Handle<Code> code) {
Handle<TypeFeedbackInfo> info = isolate()->factory()->NewTypeFeedbackInfo();
info->set_ic_total_count(ic_total_count_);
code->set_type_feedback_info(*info);
}
void FullCodeGenerator::PopulateTypeFeedbackCells(Handle<Code> code) { void FullCodeGenerator::PopulateTypeFeedbackCells(Handle<Code> code) {
if (type_feedback_cells_.is_empty()) return; if (type_feedback_cells_.is_empty()) return;
int length = type_feedback_cells_.length(); int length = type_feedback_cells_.length();
@ -371,7 +379,8 @@ void FullCodeGenerator::PopulateTypeFeedbackCells(Handle<Code> code) {
cache->SetAstId(i, Smi::FromInt(type_feedback_cells_[i].ast_id)); cache->SetAstId(i, Smi::FromInt(type_feedback_cells_[i].ast_id));
cache->SetCell(i, *type_feedback_cells_[i].cell); cache->SetCell(i, *type_feedback_cells_[i].cell);
} }
code->set_type_feedback_cells(*cache); TypeFeedbackInfo::cast(code->type_feedback_info())->set_type_feedback_cells(
*cache);
} }
@ -404,6 +413,7 @@ void FullCodeGenerator::PrepareForBailoutForId(unsigned id, State state) {
if (!info_->HasDeoptimizationSupport()) return; if (!info_->HasDeoptimizationSupport()) return;
unsigned pc_and_state = unsigned pc_and_state =
StateField::encode(state) | PcField::encode(masm_->pc_offset()); StateField::encode(state) | PcField::encode(masm_->pc_offset());
ASSERT(Smi::IsValid(pc_and_state));
BailoutEntry entry = { id, pc_and_state }; BailoutEntry entry = { id, pc_and_state };
#ifdef DEBUG #ifdef DEBUG
if (FLAG_enable_slow_asserts) { if (FLAG_enable_slow_asserts) {
@ -1073,7 +1083,7 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
// Check stack before looping. // Check stack before looping.
PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS); PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
__ bind(&stack_check); __ bind(&stack_check);
EmitStackCheck(stmt); EmitStackCheck(stmt, &body);
__ jmp(&body); __ jmp(&body);
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@ -1102,7 +1112,7 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
SetStatementPosition(stmt); SetStatementPosition(stmt);
// Check stack before looping. // Check stack before looping.
EmitStackCheck(stmt); EmitStackCheck(stmt, &body);
__ bind(&test); __ bind(&test);
VisitForControl(stmt->cond(), VisitForControl(stmt->cond(),
@ -1145,7 +1155,7 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
SetStatementPosition(stmt); SetStatementPosition(stmt);
// Check stack before looping. // Check stack before looping.
EmitStackCheck(stmt); EmitStackCheck(stmt, &body);
__ bind(&test); __ bind(&test);
if (stmt->cond() != NULL) { if (stmt->cond() != NULL) {

46
deps/v8/src/full-codegen.h

@ -77,29 +77,27 @@ class FullCodeGenerator: public AstVisitor {
TOS_REG TOS_REG
}; };
explicit FullCodeGenerator(MacroAssembler* masm) FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info)
: masm_(masm), : masm_(masm),
info_(NULL), info_(info),
scope_(NULL), scope_(info->scope()),
nesting_stack_(NULL), nesting_stack_(NULL),
loop_depth_(0), loop_depth_(0),
global_count_(0), global_count_(0),
context_(NULL), context_(NULL),
bailout_entries_(0), bailout_entries_(info->HasDeoptimizationSupport()
? info->function()->ast_node_count() : 0),
stack_checks_(2), // There's always at least one. stack_checks_(2), // There's always at least one.
type_feedback_cells_(0) { type_feedback_cells_(info->HasDeoptimizationSupport()
} ? info->function()->ast_node_count() : 0),
ic_total_count_(0) { }
static bool MakeCode(CompilationInfo* info); static bool MakeCode(CompilationInfo* info);
void Generate(CompilationInfo* info); // Encode state and pc-offset as a BitField<type, start, size>.
void PopulateDeoptimizationData(Handle<Code> code); // Only use 30 bits because we encode the result as a smi.
void PopulateTypeFeedbackCells(Handle<Code> code); class StateField : public BitField<State, 0, 1> { };
class PcField : public BitField<unsigned, 1, 30-1> { };
Handle<FixedArray> handler_table() { return handler_table_; }
class StateField : public BitField<State, 0, 8> { };
class PcField : public BitField<unsigned, 8, 32-8> { };
static const char* State2String(State state) { static const char* State2String(State state) {
switch (state) { switch (state) {
@ -424,7 +422,10 @@ class FullCodeGenerator: public AstVisitor {
// Platform-specific code for checking the stack limit at the back edge of // Platform-specific code for checking the stack limit at the back edge of
// a loop. // a loop.
void EmitStackCheck(IterationStatement* stmt); // This is meant to be called at loop back edges, |back_edge_target| is
// the jump target of the back edge and is used to approximate the amount
// of code inside the loop.
void EmitStackCheck(IterationStatement* stmt, Label* back_edge_target);
// Record the OSR AST id corresponding to a stack check in the code. // Record the OSR AST id corresponding to a stack check in the code.
void RecordStackCheck(unsigned osr_ast_id); void RecordStackCheck(unsigned osr_ast_id);
// Emit a table of stack check ids and pcs into the code stream. Return // Emit a table of stack check ids and pcs into the code stream. Return
@ -494,7 +495,7 @@ class FullCodeGenerator: public AstVisitor {
// Assign to the given expression as if via '='. The right-hand-side value // Assign to the given expression as if via '='. The right-hand-side value
// is expected in the accumulator. // is expected in the accumulator.
void EmitAssignment(Expression* expr, int bailout_ast_id); void EmitAssignment(Expression* expr);
// Complete a variable assignment. The right-hand-side value is expected // Complete a variable assignment. The right-hand-side value is expected
// in the accumulator. // in the accumulator.
@ -510,6 +511,10 @@ class FullCodeGenerator: public AstVisitor {
// accumulator. // accumulator.
void EmitKeyedPropertyAssignment(Assignment* expr); void EmitKeyedPropertyAssignment(Assignment* expr);
void CallIC(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId);
void SetFunctionPosition(FunctionLiteral* fun); void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun); void SetReturnPosition(FunctionLiteral* fun);
void SetStatementPosition(Statement* stmt); void SetStatementPosition(Statement* stmt);
@ -575,6 +580,13 @@ class FullCodeGenerator: public AstVisitor {
void VisitForTypeofValue(Expression* expr); void VisitForTypeofValue(Expression* expr);
void Generate();
void PopulateDeoptimizationData(Handle<Code> code);
void PopulateTypeFeedbackInfo(Handle<Code> code);
void PopulateTypeFeedbackCells(Handle<Code> code);
Handle<FixedArray> handler_table() { return handler_table_; }
struct BailoutEntry { struct BailoutEntry {
unsigned id; unsigned id;
unsigned pc_and_state; unsigned pc_and_state;
@ -773,7 +785,9 @@ class FullCodeGenerator: public AstVisitor {
ZoneList<BailoutEntry> bailout_entries_; ZoneList<BailoutEntry> bailout_entries_;
ZoneList<BailoutEntry> stack_checks_; ZoneList<BailoutEntry> stack_checks_;
ZoneList<TypeFeedbackCellEntry> type_feedback_cells_; ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
int ic_total_count_;
Handle<FixedArray> handler_table_; Handle<FixedArray> handler_table_;
Handle<JSGlobalPropertyCell> profiling_counter_;
friend class NestedStatement; friend class NestedStatement;

7
deps/v8/src/globals.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -191,6 +191,11 @@ typedef byte* Address;
#define V8_PTR_PREFIX "" #define V8_PTR_PREFIX ""
#endif // V8_HOST_ARCH_64_BIT #endif // V8_HOST_ARCH_64_BIT
#ifdef __MINGW64__
#undef V8_PTR_PREFIX
#define V8_PTR_PREFIX "I64"
#endif // __MINGW64__
// The following macro works on both 32 and 64-bit platforms. // The following macro works on both 32 and 64-bit platforms.
// Usage: instead of writing 0x1234567890123456 // Usage: instead of writing 0x1234567890123456
// write V8_2PART_UINT64_C(0x12345678,90123456); // write V8_2PART_UINT64_C(0x12345678,90123456);

39
deps/v8/src/handles.cc

@ -711,26 +711,57 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
isolate); isolate);
} }
isolate->counters()->enum_cache_misses()->Increment(); isolate->counters()->enum_cache_misses()->Increment();
Handle<Map> map(object->map());
int num_enum = object->NumberOfLocalProperties(DONT_ENUM); int num_enum = object->NumberOfLocalProperties(DONT_ENUM);
Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum); Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
Handle<FixedArray> sort_array = isolate->factory()->NewFixedArray(num_enum); Handle<FixedArray> sort_array = isolate->factory()->NewFixedArray(num_enum);
Handle<FixedArray> indices;
Handle<FixedArray> sort_array2;
if (cache_result) {
indices = isolate->factory()->NewFixedArray(num_enum);
sort_array2 = isolate->factory()->NewFixedArray(num_enum);
}
Handle<DescriptorArray> descs = Handle<DescriptorArray> descs =
Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate); Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate);
for (int i = 0; i < descs->number_of_descriptors(); i++) { for (int i = 0; i < descs->number_of_descriptors(); i++) {
if (descs->IsProperty(i) && !descs->IsDontEnum(i)) { if (descs->IsProperty(i) && !descs->IsDontEnum(i)) {
(*storage)->set(index, descs->GetKey(i)); storage->set(index, descs->GetKey(i));
PropertyDetails details(descs->GetDetails(i)); PropertyDetails details(descs->GetDetails(i));
(*sort_array)->set(index, Smi::FromInt(details.index())); sort_array->set(index, Smi::FromInt(details.index()));
if (!indices.is_null()) {
if (details.type() != FIELD) {
indices = Handle<FixedArray>();
sort_array2 = Handle<FixedArray>();
} else {
int field_index = Descriptor::IndexFromValue(descs->GetValue(i));
if (field_index >= map->inobject_properties()) {
field_index = -(field_index - map->inobject_properties() + 1);
}
indices->set(index, Smi::FromInt(field_index));
sort_array2->set(index, Smi::FromInt(details.index()));
}
}
index++; index++;
} }
} }
(*storage)->SortPairs(*sort_array, sort_array->length()); storage->SortPairs(*sort_array, sort_array->length());
if (!indices.is_null()) {
indices->SortPairs(*sort_array2, sort_array2->length());
}
if (cache_result) { if (cache_result) {
Handle<FixedArray> bridge_storage = Handle<FixedArray> bridge_storage =
isolate->factory()->NewFixedArray( isolate->factory()->NewFixedArray(
DescriptorArray::kEnumCacheBridgeLength); DescriptorArray::kEnumCacheBridgeLength);
DescriptorArray* desc = object->map()->instance_descriptors(); DescriptorArray* desc = object->map()->instance_descriptors();
desc->SetEnumCache(*bridge_storage, *storage); desc->SetEnumCache(*bridge_storage,
*storage,
indices.is_null() ? Object::cast(Smi::FromInt(0))
: Object::cast(*indices));
} }
ASSERT(storage->length() == index); ASSERT(storage->length() == index);
return storage; return storage;

61
deps/v8/src/heap.cc

@ -1938,6 +1938,19 @@ MaybeObject* Heap::AllocateAccessorPair() {
} }
MaybeObject* Heap::AllocateTypeFeedbackInfo() {
TypeFeedbackInfo* info;
{ MaybeObject* maybe_result = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
if (!maybe_result->To(&info)) return maybe_result;
}
info->set_ic_total_count(0);
info->set_ic_with_typeinfo_count(0);
info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
return info;
}
const Heap::StringTypeTable Heap::string_type_table[] = { const Heap::StringTypeTable Heap::string_type_table[] = {
#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \ #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
{type, size, k##camel_name##MapRootIndex}, {type, size, k##camel_name##MapRootIndex},
@ -2217,6 +2230,12 @@ bool Heap::CreateInitialMaps() {
} }
set_block_context_map(Map::cast(obj)); set_block_context_map(Map::cast(obj));
{ MaybeObject* maybe_obj =
AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_module_context_map(Map::cast(obj));
{ MaybeObject* maybe_obj = { MaybeObject* maybe_obj =
AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel); AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (!maybe_obj->ToObject(&obj)) return false; if (!maybe_obj->ToObject(&obj)) return false;
@ -3361,8 +3380,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
code->set_check_type(RECEIVER_MAP_CHECK); code->set_check_type(RECEIVER_MAP_CHECK);
} }
code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER); code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()), code->set_type_feedback_info(undefined_value(), SKIP_WRITE_BARRIER);
SKIP_WRITE_BARRIER);
code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER); code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_gc_metadata(Smi::FromInt(0)); code->set_gc_metadata(Smi::FromInt(0));
// Allow self references to created code object by patching the handle to // Allow self references to created code object by patching the handle to
@ -4361,10 +4379,10 @@ MaybeObject* Heap::AllocateJSArray(
Context* global_context = isolate()->context()->global_context(); Context* global_context = isolate()->context()->global_context();
JSFunction* array_function = global_context->array_function(); JSFunction* array_function = global_context->array_function();
Map* map = array_function->initial_map(); Map* map = array_function->initial_map();
if (elements_kind == FAST_ELEMENTS || !FLAG_smi_only_arrays) { if (elements_kind == FAST_DOUBLE_ELEMENTS) {
map = Map::cast(global_context->object_js_array_map());
} else if (elements_kind == FAST_DOUBLE_ELEMENTS) {
map = Map::cast(global_context->double_js_array_map()); map = Map::cast(global_context->double_js_array_map());
} else if (elements_kind == FAST_ELEMENTS || !FLAG_smi_only_arrays) {
map = Map::cast(global_context->object_js_array_map());
} else { } else {
ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS); ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
ASSERT(map == global_context->smi_js_array_map()); ASSERT(map == global_context->smi_js_array_map());
@ -4562,7 +4580,7 @@ MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
MaybeObject* Heap::AllocateUninitializedFixedDoubleArray( MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
int length, int length,
PretenureFlag pretenure) { PretenureFlag pretenure) {
if (length == 0) return empty_fixed_double_array(); if (length == 0) return empty_fixed_array();
Object* elements_object; Object* elements_object;
MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure); MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
@ -4579,7 +4597,7 @@ MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles( MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
int length, int length,
PretenureFlag pretenure) { PretenureFlag pretenure) {
if (length == 0) return empty_fixed_double_array(); if (length == 0) return empty_fixed_array();
Object* elements_object; Object* elements_object;
MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure); MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
@ -5062,8 +5080,37 @@ void Heap::Verify() {
cell_space_->Verify(&no_dirty_regions_visitor); cell_space_->Verify(&no_dirty_regions_visitor);
lo_space_->Verify(); lo_space_->Verify();
VerifyNoAccessorPairSharing();
} }
void Heap::VerifyNoAccessorPairSharing() {
// Verification is done in 2 phases: First we mark all AccessorPairs, checking
// that we mark only unmarked pairs, then we clear all marks, restoring the
// initial state. We use the Smi tag of the AccessorPair's getter as the
// marking bit, because we can never see a Smi as the getter.
for (int phase = 0; phase < 2; phase++) {
HeapObjectIterator iter(map_space());
for (HeapObject* obj = iter.Next(); obj != NULL; obj = iter.Next()) {
if (obj->IsMap()) {
DescriptorArray* descs = Map::cast(obj)->instance_descriptors();
for (int i = 0; i < descs->number_of_descriptors(); i++) {
if (descs->GetType(i) == CALLBACKS &&
descs->GetValue(i)->IsAccessorPair()) {
AccessorPair* accessors = AccessorPair::cast(descs->GetValue(i));
uintptr_t before = reinterpret_cast<intptr_t>(accessors->getter());
uintptr_t after = (phase == 0) ?
((before & ~kSmiTagMask) | kSmiTag) :
((before & ~kHeapObjectTag) | kHeapObjectTag);
CHECK(before != after);
accessors->set_getter(reinterpret_cast<Object*>(after));
}
}
}
}
}
}
#endif // DEBUG #endif // DEBUG

15
deps/v8/src/heap.h

@ -74,7 +74,6 @@ namespace internal {
V(Map, hash_table_map, HashTableMap) \ V(Map, hash_table_map, HashTableMap) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \ V(FixedArray, empty_fixed_array, EmptyFixedArray) \
V(ByteArray, empty_byte_array, EmptyByteArray) \ V(ByteArray, empty_byte_array, EmptyByteArray) \
V(FixedDoubleArray, empty_fixed_double_array, EmptyFixedDoubleArray) \
V(String, empty_string, EmptyString) \ V(String, empty_string, EmptyString) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \ V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(Smi, stack_limit, StackLimit) \ V(Smi, stack_limit, StackLimit) \
@ -131,6 +130,7 @@ namespace internal {
V(Map, catch_context_map, CatchContextMap) \ V(Map, catch_context_map, CatchContextMap) \
V(Map, with_context_map, WithContextMap) \ V(Map, with_context_map, WithContextMap) \
V(Map, block_context_map, BlockContextMap) \ V(Map, block_context_map, BlockContextMap) \
V(Map, module_context_map, ModuleContextMap) \
V(Map, oddball_map, OddballMap) \ V(Map, oddball_map, OddballMap) \
V(Map, message_object_map, JSMessageObjectMap) \ V(Map, message_object_map, JSMessageObjectMap) \
V(Map, foreign_map, ForeignMap) \ V(Map, foreign_map, ForeignMap) \
@ -205,12 +205,10 @@ namespace internal {
V(InitializeConstGlobal_symbol, "InitializeConstGlobal") \ V(InitializeConstGlobal_symbol, "InitializeConstGlobal") \
V(KeyedLoadElementMonomorphic_symbol, \ V(KeyedLoadElementMonomorphic_symbol, \
"KeyedLoadElementMonomorphic") \ "KeyedLoadElementMonomorphic") \
V(KeyedLoadElementPolymorphic_symbol, \
"KeyedLoadElementPolymorphic") \
V(KeyedStoreElementMonomorphic_symbol, \ V(KeyedStoreElementMonomorphic_symbol, \
"KeyedStoreElementMonomorphic") \ "KeyedStoreElementMonomorphic") \
V(KeyedStoreElementPolymorphic_symbol, \ V(KeyedStoreAndGrowElementMonomorphic_symbol, \
"KeyedStoreElementPolymorphic") \ "KeyedStoreAndGrowElementMonomorphic") \
V(stack_overflow_symbol, "kStackOverflowBoilerplate") \ V(stack_overflow_symbol, "kStackOverflowBoilerplate") \
V(illegal_access_symbol, "illegal access") \ V(illegal_access_symbol, "illegal access") \
V(out_of_memory_symbol, "out-of-memory") \ V(out_of_memory_symbol, "out-of-memory") \
@ -643,6 +641,9 @@ class Heap {
// Allocates a pre-tenured empty AccessorPair. // Allocates a pre-tenured empty AccessorPair.
MUST_USE_RESULT MaybeObject* AllocateAccessorPair(); MUST_USE_RESULT MaybeObject* AllocateAccessorPair();
// Allocates an empty TypeFeedbackInfo.
MUST_USE_RESULT MaybeObject* AllocateTypeFeedbackInfo();
// Clear the Instanceof cache (used when a prototype changes). // Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache(); inline void ClearInstanceofCache();
@ -1222,6 +1223,10 @@ class Heap {
// Verify the heap is in its normal state before or after a GC. // Verify the heap is in its normal state before or after a GC.
void Verify(); void Verify();
// Verify that AccessorPairs are not shared, i.e. make sure that they have
// exactly one pointer to them.
void VerifyNoAccessorPairSharing();
void OldPointerSpaceCheckStoreBuffer(); void OldPointerSpaceCheckStoreBuffer();
void MapSpaceCheckStoreBuffer(); void MapSpaceCheckStoreBuffer();
void LargeObjectSpaceCheckStoreBuffer(); void LargeObjectSpaceCheckStoreBuffer();

76
deps/v8/src/hydrogen-instructions.cc

@ -786,6 +786,33 @@ void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
} }
void HCheckMapValue::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" ");
map()->PrintNameTo(stream);
}
void HForInPrepareMap::PrintDataTo(StringStream* stream) {
enumerable()->PrintNameTo(stream);
}
void HForInCacheArray::PrintDataTo(StringStream* stream) {
enumerable()->PrintNameTo(stream);
stream->Add(" ");
map()->PrintNameTo(stream);
stream->Add("[%d]", idx_);
}
void HLoadFieldByIndex::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(" ");
index()->PrintNameTo(stream);
}
HValue* HConstant::Canonicalize() { HValue* HConstant::Canonicalize() {
return HasNoUses() && !IsBlockEntry() ? NULL : this; return HasNoUses() && !IsBlockEntry() ? NULL : this;
} }
@ -1519,10 +1546,15 @@ void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
bool HLoadKeyedFastElement::RequiresHoleCheck() { bool HLoadKeyedFastElement::RequiresHoleCheck() {
if (hole_check_mode_ == OMIT_HOLE_CHECK) {
return false;
}
for (HUseIterator it(uses()); !it.Done(); it.Advance()) { for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value(); HValue* use = it.value();
if (!use->IsChange()) return true; if (!use->IsChange()) return true;
} }
return false; return false;
} }
@ -1543,6 +1575,39 @@ void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
} }
HValue* HLoadKeyedGeneric::Canonicalize() {
// Recognize generic keyed loads that use property name generated
// by for-in statement as a key and rewrite them into fast property load
// by index.
if (key()->IsLoadKeyedFastElement()) {
HLoadKeyedFastElement* key_load = HLoadKeyedFastElement::cast(key());
if (key_load->object()->IsForInCacheArray()) {
HForInCacheArray* names_cache =
HForInCacheArray::cast(key_load->object());
if (names_cache->enumerable() == object()) {
HForInCacheArray* index_cache =
names_cache->index_cache();
HCheckMapValue* map_check =
new(block()->zone()) HCheckMapValue(object(), names_cache->map());
HInstruction* index = new(block()->zone()) HLoadKeyedFastElement(
index_cache,
key_load->key(),
HLoadKeyedFastElement::OMIT_HOLE_CHECK);
HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex(
object(), index);
map_check->InsertBefore(this);
index->InsertBefore(this);
load->InsertBefore(this);
return load;
}
}
}
return this;
}
void HLoadKeyedSpecializedArrayElement::PrintDataTo( void HLoadKeyedSpecializedArrayElement::PrintDataTo(
StringStream* stream) { StringStream* stream) {
external_pointer()->PrintNameTo(stream); external_pointer()->PrintNameTo(stream);
@ -1841,17 +1906,18 @@ HType HStringCharFromCode::CalculateInferredType() {
} }
HType HArrayLiteral::CalculateInferredType() { HType HFastLiteral::CalculateInferredType() {
return HType::JSArray(); // TODO(mstarzinger): Be smarter, could also be JSArray here.
return HType::JSObject();
} }
HType HObjectLiteralFast::CalculateInferredType() { HType HArrayLiteral::CalculateInferredType() {
return HType::JSObject(); return HType::JSArray();
} }
HType HObjectLiteralGeneric::CalculateInferredType() { HType HObjectLiteral::CalculateInferredType() {
return HType::JSObject(); return HType::JSObject();
} }

274
deps/v8/src/hydrogen-instructions.h

@ -97,11 +97,13 @@ class LChunkBuilder;
V(CompareConstantEqAndBranch) \ V(CompareConstantEqAndBranch) \
V(Constant) \ V(Constant) \
V(Context) \ V(Context) \
V(DeclareGlobals) \
V(DeleteProperty) \ V(DeleteProperty) \
V(Deoptimize) \ V(Deoptimize) \
V(Div) \ V(Div) \
V(ElementsKind) \ V(ElementsKind) \
V(EnterInlined) \ V(EnterInlined) \
V(FastLiteral) \
V(FixedArrayBaseLength) \ V(FixedArrayBaseLength) \
V(ForceRepresentation) \ V(ForceRepresentation) \
V(FunctionLiteral) \ V(FunctionLiteral) \
@ -139,8 +141,7 @@ class LChunkBuilder;
V(LoadNamedGeneric) \ V(LoadNamedGeneric) \
V(Mod) \ V(Mod) \
V(Mul) \ V(Mul) \
V(ObjectLiteralFast) \ V(ObjectLiteral) \
V(ObjectLiteralGeneric) \
V(OsrEntry) \ V(OsrEntry) \
V(OuterContext) \ V(OuterContext) \
V(Parameter) \ V(Parameter) \
@ -179,7 +180,11 @@ class LChunkBuilder;
V(UnaryMathOperation) \ V(UnaryMathOperation) \
V(UnknownOSRValue) \ V(UnknownOSRValue) \
V(UseConst) \ V(UseConst) \
V(ValueOf) V(ValueOf) \
V(ForInPrepareMap) \
V(ForInCacheArray) \
V(CheckMapValue) \
V(LoadFieldByIndex)
#define GVN_FLAG_LIST(V) \ #define GVN_FLAG_LIST(V) \
V(Calls) \ V(Calls) \
@ -1486,6 +1491,33 @@ class HOuterContext: public HUnaryOperation {
}; };
class HDeclareGlobals: public HUnaryOperation {
public:
HDeclareGlobals(HValue* context,
Handle<FixedArray> pairs,
int flags)
: HUnaryOperation(context),
pairs_(pairs),
flags_(flags) {
set_representation(Representation::Tagged());
SetAllSideEffects();
}
HValue* context() { return OperandAt(0); }
Handle<FixedArray> pairs() const { return pairs_; }
int flags() const { return flags_; }
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals)
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
private:
Handle<FixedArray> pairs_;
int flags_;
};
class HGlobalObject: public HUnaryOperation { class HGlobalObject: public HUnaryOperation {
public: public:
explicit HGlobalObject(HValue* context) : HUnaryOperation(context) { explicit HGlobalObject(HValue* context) : HUnaryOperation(context) {
@ -1983,7 +2015,8 @@ class HLoadExternalArrayPointer: public HUnaryOperation {
class HCheckMap: public HTemplateInstruction<2> { class HCheckMap: public HTemplateInstruction<2> {
public: public:
HCheckMap(HValue* value, Handle<Map> map, HCheckMap(HValue* value,
Handle<Map> map,
HValue* typecheck = NULL, HValue* typecheck = NULL,
CompareMapMode mode = REQUIRE_EXACT_MAP) CompareMapMode mode = REQUIRE_EXACT_MAP)
: map_(map), : map_(map),
@ -3786,7 +3819,12 @@ class HLoadFunctionPrototype: public HUnaryOperation {
class HLoadKeyedFastElement: public HTemplateInstruction<2> { class HLoadKeyedFastElement: public HTemplateInstruction<2> {
public: public:
HLoadKeyedFastElement(HValue* obj, HValue* key) { enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK };
HLoadKeyedFastElement(HValue* obj,
HValue* key,
HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK)
: hole_check_mode_(hole_check_mode) {
SetOperandAt(0, obj); SetOperandAt(0, obj);
SetOperandAt(1, key); SetOperandAt(1, key);
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
@ -3811,7 +3849,14 @@ class HLoadKeyedFastElement: public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement) DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement)
protected: protected:
virtual bool DataEquals(HValue* other) { return true; } virtual bool DataEquals(HValue* other) {
if (!other->IsLoadKeyedFastElement()) return false;
HLoadKeyedFastElement* other_load = HLoadKeyedFastElement::cast(other);
return hole_check_mode_ == other_load->hole_check_mode_;
}
private:
HoleCheckMode hole_check_mode_;
}; };
@ -3915,6 +3960,8 @@ class HLoadKeyedGeneric: public HTemplateInstruction<3> {
return Representation::Tagged(); return Representation::Tagged();
} }
virtual HValue* Canonicalize();
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric) DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric)
}; };
@ -4163,17 +4210,8 @@ class HTransitionElementsKind: public HTemplateInstruction<1> {
transitioned_map_(transitioned_map) { transitioned_map_(transitioned_map) {
SetOperandAt(0, object); SetOperandAt(0, object);
SetFlag(kUseGVN); SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
SetGVNFlag(kChangesElementsKind); SetGVNFlag(kChangesElementsKind);
if (original_map->has_fast_double_elements()) {
SetGVNFlag(kChangesElementsPointer); SetGVNFlag(kChangesElementsPointer);
SetGVNFlag(kDependsOnElementsPointer);
SetGVNFlag(kDependsOnDoubleArrayElements);
} else if (transitioned_map->has_fast_double_elements()) {
SetGVNFlag(kChangesElementsPointer);
SetGVNFlag(kDependsOnElementsPointer);
SetGVNFlag(kDependsOnArrayElements);
}
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
} }
@ -4329,82 +4367,82 @@ class HMaterializedLiteral: public HTemplateInstruction<V> {
}; };
class HArrayLiteral: public HMaterializedLiteral<1> { class HFastLiteral: public HMaterializedLiteral<1> {
public: public:
HArrayLiteral(HValue* context, HFastLiteral(HValue* context,
Handle<HeapObject> boilerplate_object, Handle<JSObject> boilerplate,
int length, int total_size,
int literal_index, int literal_index,
int depth) int depth)
: HMaterializedLiteral<1>(literal_index, depth), : HMaterializedLiteral<1>(literal_index, depth),
length_(length), boilerplate_(boilerplate),
boilerplate_object_(boilerplate_object) { total_size_(total_size) {
SetOperandAt(0, context); SetOperandAt(0, context);
} }
HValue* context() { return OperandAt(0); } // Maximum depth and total number of elements and properties for literal
ElementsKind boilerplate_elements_kind() const { // graphs to be considered for fast deep-copying.
if (!boilerplate_object_->IsJSObject()) { static const int kMaxLiteralDepth = 3;
return FAST_ELEMENTS; static const int kMaxLiteralProperties = 8;
}
return Handle<JSObject>::cast(boilerplate_object_)->GetElementsKind();
}
Handle<HeapObject> boilerplate_object() const { return boilerplate_object_; }
int length() const { return length_; }
bool IsCopyOnWrite() const; HValue* context() { return OperandAt(0); }
Handle<JSObject> boilerplate() const { return boilerplate_; }
int total_size() const { return total_size_; }
virtual Representation RequiredInputRepresentation(int index) { virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged(); return Representation::Tagged();
} }
virtual HType CalculateInferredType(); virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral) DECLARE_CONCRETE_INSTRUCTION(FastLiteral)
private: private:
int length_; Handle<JSObject> boilerplate_;
Handle<HeapObject> boilerplate_object_; int total_size_;
}; };
class HObjectLiteralFast: public HMaterializedLiteral<1> { class HArrayLiteral: public HMaterializedLiteral<1> {
public: public:
HObjectLiteralFast(HValue* context, HArrayLiteral(HValue* context,
Handle<JSObject> boilerplate, Handle<HeapObject> boilerplate_object,
int total_size, int length,
int literal_index, int literal_index,
int depth) int depth)
: HMaterializedLiteral<1>(literal_index, depth), : HMaterializedLiteral<1>(literal_index, depth),
boilerplate_(boilerplate), length_(length),
total_size_(total_size) { boilerplate_object_(boilerplate_object) {
SetOperandAt(0, context); SetOperandAt(0, context);
} }
// Maximum depth and total number of properties for object literal
// graphs to be considered for fast deep-copying.
static const int kMaxObjectLiteralDepth = 3;
static const int kMaxObjectLiteralProperties = 8;
HValue* context() { return OperandAt(0); } HValue* context() { return OperandAt(0); }
Handle<JSObject> boilerplate() const { return boilerplate_; } ElementsKind boilerplate_elements_kind() const {
int total_size() const { return total_size_; } if (!boilerplate_object_->IsJSObject()) {
return FAST_ELEMENTS;
}
return Handle<JSObject>::cast(boilerplate_object_)->GetElementsKind();
}
Handle<HeapObject> boilerplate_object() const { return boilerplate_object_; }
int length() const { return length_; }
bool IsCopyOnWrite() const;
virtual Representation RequiredInputRepresentation(int index) { virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged(); return Representation::Tagged();
} }
virtual HType CalculateInferredType(); virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast) DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral)
private: private:
Handle<JSObject> boilerplate_; int length_;
int total_size_; Handle<HeapObject> boilerplate_object_;
}; };
class HObjectLiteralGeneric: public HMaterializedLiteral<1> { class HObjectLiteral: public HMaterializedLiteral<1> {
public: public:
HObjectLiteralGeneric(HValue* context, HObjectLiteral(HValue* context,
Handle<FixedArray> constant_properties, Handle<FixedArray> constant_properties,
bool fast_elements, bool fast_elements,
int literal_index, int literal_index,
@ -4429,7 +4467,7 @@ class HObjectLiteralGeneric: public HMaterializedLiteral<1> {
} }
virtual HType CalculateInferredType(); virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric) DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral)
private: private:
Handle<FixedArray> constant_properties_; Handle<FixedArray> constant_properties_;
@ -4524,7 +4562,7 @@ class HToFastProperties: public HUnaryOperation {
// This instruction is not marked as having side effects, but // This instruction is not marked as having side effects, but
// changes the map of the input operand. Use it only when creating // changes the map of the input operand. Use it only when creating
// object literals. // object literals.
ASSERT(value->IsObjectLiteralGeneric() || value->IsObjectLiteralFast()); ASSERT(value->IsObjectLiteral() || value->IsFastLiteral());
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
} }
@ -4598,6 +4636,134 @@ class HIn: public HTemplateInstruction<3> {
DECLARE_CONCRETE_INSTRUCTION(In) DECLARE_CONCRETE_INSTRUCTION(In)
}; };
class HCheckMapValue: public HTemplateInstruction<2> {
public:
HCheckMapValue(HValue* value,
HValue* map) {
SetOperandAt(0, value);
SetOperandAt(1, map);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
SetGVNFlag(kDependsOnElementsKind);
}
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType() {
return HType::Tagged();
}
HValue* value() { return OperandAt(0); }
HValue* map() { return OperandAt(1); }
DECLARE_CONCRETE_INSTRUCTION(CheckMapValue)
protected:
virtual bool DataEquals(HValue* other) {
return true;
}
};
class HForInPrepareMap : public HTemplateInstruction<2> {
public:
HForInPrepareMap(HValue* context,
HValue* object) {
SetOperandAt(0, context);
SetOperandAt(1, object);
set_representation(Representation::Tagged());
SetAllSideEffects();
}
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
HValue* context() { return OperandAt(0); }
HValue* enumerable() { return OperandAt(1); }
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType() {
return HType::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap);
};
class HForInCacheArray : public HTemplateInstruction<2> {
public:
HForInCacheArray(HValue* enumerable,
HValue* keys,
int idx) : idx_(idx) {
SetOperandAt(0, enumerable);
SetOperandAt(1, keys);
set_representation(Representation::Tagged());
}
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
HValue* enumerable() { return OperandAt(0); }
HValue* map() { return OperandAt(1); }
int idx() { return idx_; }
HForInCacheArray* index_cache() {
return index_cache_;
}
void set_index_cache(HForInCacheArray* index_cache) {
index_cache_ = index_cache;
}
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType() {
return HType::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray);
private:
int idx_;
HForInCacheArray* index_cache_;
};
class HLoadFieldByIndex : public HTemplateInstruction<2> {
public:
HLoadFieldByIndex(HValue* object,
HValue* index) {
SetOperandAt(0, object);
SetOperandAt(1, index);
set_representation(Representation::Tagged());
}
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
HValue* object() { return OperandAt(0); }
HValue* index() { return OperandAt(1); }
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType() {
return HType::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex);
};
#undef DECLARE_INSTRUCTION #undef DECLARE_INSTRUCTION
#undef DECLARE_CONCRETE_INSTRUCTION #undef DECLARE_CONCRETE_INSTRUCTION

421
deps/v8/src/hydrogen.cc

@ -446,7 +446,7 @@ class ReachabilityAnalyzer BASE_EMBEDDED {
HBasicBlock* dont_visit) HBasicBlock* dont_visit)
: visited_count_(0), : visited_count_(0),
stack_(16), stack_(16),
reachable_(block_count), reachable_(block_count, ZONE),
dont_visit_(dont_visit) { dont_visit_(dont_visit) {
PushBlock(entry_block); PushBlock(entry_block);
Analyze(); Analyze();
@ -744,7 +744,7 @@ void HGraph::Canonicalize() {
void HGraph::OrderBlocks() { void HGraph::OrderBlocks() {
HPhase phase("Block ordering"); HPhase phase("Block ordering");
BitVector visited(blocks_.length()); BitVector visited(blocks_.length(), zone());
ZoneList<HBasicBlock*> reverse_result(8); ZoneList<HBasicBlock*> reverse_result(8);
HBasicBlock* start = blocks_[0]; HBasicBlock* start = blocks_[0];
@ -955,7 +955,7 @@ void HGraph::CollectPhis() {
void HGraph::InferTypes(ZoneList<HValue*>* worklist) { void HGraph::InferTypes(ZoneList<HValue*>* worklist) {
BitVector in_worklist(GetMaximumValueID()); BitVector in_worklist(GetMaximumValueID(), zone());
for (int i = 0; i < worklist->length(); ++i) { for (int i = 0; i < worklist->length(); ++i) {
ASSERT(!in_worklist.Contains(worklist->at(i)->id())); ASSERT(!in_worklist.Contains(worklist->at(i)->id()));
in_worklist.Add(worklist->at(i)->id()); in_worklist.Add(worklist->at(i)->id());
@ -1431,7 +1431,8 @@ class HGlobalValueNumberer BASE_EMBEDDED {
void ProcessLoopBlock(HBasicBlock* block, void ProcessLoopBlock(HBasicBlock* block,
HBasicBlock* before_loop, HBasicBlock* before_loop,
GVNFlagSet loop_kills, GVNFlagSet loop_kills,
GVNFlagSet* accumulated_first_time_depends); GVNFlagSet* accumulated_first_time_depends,
GVNFlagSet* accumulated_first_time_changes);
bool AllowCodeMotion(); bool AllowCodeMotion();
bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header); bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
@ -1512,10 +1513,12 @@ void HGlobalValueNumberer::LoopInvariantCodeMotion() {
side_effects.ToIntegral()); side_effects.ToIntegral());
GVNFlagSet accumulated_first_time_depends; GVNFlagSet accumulated_first_time_depends;
GVNFlagSet accumulated_first_time_changes;
HBasicBlock* last = block->loop_information()->GetLastBackEdge(); HBasicBlock* last = block->loop_information()->GetLastBackEdge();
for (int j = block->block_id(); j <= last->block_id(); ++j) { for (int j = block->block_id(); j <= last->block_id(); ++j) {
ProcessLoopBlock(graph_->blocks()->at(j), block, side_effects, ProcessLoopBlock(graph_->blocks()->at(j), block, side_effects,
&accumulated_first_time_depends); &accumulated_first_time_depends,
&accumulated_first_time_changes);
} }
} }
} }
@ -1526,7 +1529,8 @@ void HGlobalValueNumberer::ProcessLoopBlock(
HBasicBlock* block, HBasicBlock* block,
HBasicBlock* loop_header, HBasicBlock* loop_header,
GVNFlagSet loop_kills, GVNFlagSet loop_kills,
GVNFlagSet* accumulated_first_time_depends) { GVNFlagSet* first_time_depends,
GVNFlagSet* first_time_changes) {
HBasicBlock* pre_header = loop_header->predecessors()->at(0); HBasicBlock* pre_header = loop_header->predecessors()->at(0);
GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills); GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
TraceGVN("Loop invariant motion for B%d depends_flags=0x%x\n", TraceGVN("Loop invariant motion for B%d depends_flags=0x%x\n",
@ -1544,28 +1548,47 @@ void HGlobalValueNumberer::ProcessLoopBlock(
instr->gvn_flags().ToIntegral(), instr->gvn_flags().ToIntegral(),
depends_flags.ToIntegral()); depends_flags.ToIntegral());
bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags); bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags);
if (!can_hoist && instr->IsTransitionElementsKind()) { if (instr->IsTransitionElementsKind()) {
// It's only possible to hoist one time side effects if there are no // It's possible to hoist transitions out of a loop as long as the
// dependencies on their changes from the loop header to the current // hoisting wouldn't move the transition past a DependsOn of one of it's
// instruction. // changes or any instructions that might change an objects map or
GVNFlagSet converted_changes = // elements contents.
HValue::ConvertChangesToDependsFlags(instr->ChangesFlags()); GVNFlagSet changes = instr->ChangesFlags();
TraceGVN("Checking dependencies on one-time instruction %d (%s) " GVNFlagSet hoist_depends_blockers =
"converted changes 0x%X, accumulated depends 0x%X\n", HValue::ConvertChangesToDependsFlags(changes);
// In addition to not hoisting transitions above other instructions that
// change dependencies that the transition changes, it must not be
// hoisted above map changes and stores to an elements backing store
// that the transition might change.
GVNFlagSet hoist_change_blockers = changes;
hoist_change_blockers.Add(kChangesMaps);
HTransitionElementsKind* trans = HTransitionElementsKind::cast(instr);
if (trans->original_map()->has_fast_double_elements()) {
hoist_change_blockers.Add(kChangesDoubleArrayElements);
}
if (trans->transitioned_map()->has_fast_double_elements()) {
hoist_change_blockers.Add(kChangesArrayElements);
}
TraceGVN("Checking dependencies on HTransitionElementsKind %d (%s) "
"hoist depends blockers 0x%X, hoist change blockers 0x%X, "
"accumulated depends 0x%X, accumulated changes 0x%X\n",
instr->id(), instr->id(),
instr->Mnemonic(), instr->Mnemonic(),
converted_changes.ToIntegral(), hoist_depends_blockers.ToIntegral(),
accumulated_first_time_depends->ToIntegral()); hoist_change_blockers.ToIntegral(),
// It's possible to hoist one-time side effects from the current loop first_time_depends->ToIntegral(),
// loop only if they dominate all of the successor blocks in the same first_time_changes->ToIntegral());
// loop and there are not any instructions that have Changes/DependsOn // It's possible to hoist transition from the current loop loop only if
// that intervene between it and the beginning of the loop header. // they dominate all of the successor blocks in the same loop and there
// are not any instructions that have Changes/DependsOn that intervene
// between it and the beginning of the loop header.
bool in_nested_loop = block != loop_header && bool in_nested_loop = block != loop_header &&
((block->parent_loop_header() != loop_header) || ((block->parent_loop_header() != loop_header) ||
block->IsLoopHeader()); block->IsLoopHeader());
can_hoist = !in_nested_loop && can_hoist = !in_nested_loop &&
block->IsLoopSuccessorDominator() && block->IsLoopSuccessorDominator() &&
!accumulated_first_time_depends->ContainsAnyOf(converted_changes); !first_time_depends->ContainsAnyOf(hoist_depends_blockers) &&
!first_time_changes->ContainsAnyOf(hoist_change_blockers);
} }
if (can_hoist) { if (can_hoist) {
@ -1589,10 +1612,8 @@ void HGlobalValueNumberer::ProcessLoopBlock(
if (!hoisted) { if (!hoisted) {
// If an instruction is not hoisted, we have to account for its side // If an instruction is not hoisted, we have to account for its side
// effects when hoisting later HTransitionElementsKind instructions. // effects when hoisting later HTransitionElementsKind instructions.
accumulated_first_time_depends->Add(instr->DependsOnFlags()); first_time_depends->Add(instr->DependsOnFlags());
GVNFlagSet converted_changes = first_time_changes->Add(instr->ChangesFlags());
HValue::ConvertChangesToDependsFlags(instr->SideEffectFlags());
accumulated_first_time_depends->Add(converted_changes);
} }
instr = next; instr = next;
} }
@ -1698,7 +1719,9 @@ void HGlobalValueNumberer::AnalyzeBlock(HBasicBlock* block, HValueMap* map) {
class HInferRepresentation BASE_EMBEDDED { class HInferRepresentation BASE_EMBEDDED {
public: public:
explicit HInferRepresentation(HGraph* graph) explicit HInferRepresentation(HGraph* graph)
: graph_(graph), worklist_(8), in_worklist_(graph->GetMaximumValueID()) {} : graph_(graph),
worklist_(8),
in_worklist_(graph->GetMaximumValueID(), graph->zone()) { }
void Analyze(); void Analyze();
@ -1815,7 +1838,7 @@ void HInferRepresentation::Analyze() {
ZoneList<BitVector*> connected_phis(phi_count); ZoneList<BitVector*> connected_phis(phi_count);
for (int i = 0; i < phi_count; ++i) { for (int i = 0; i < phi_count; ++i) {
phi_list->at(i)->InitRealUses(i); phi_list->at(i)->InitRealUses(i);
BitVector* connected_set = new(zone()) BitVector(phi_count); BitVector* connected_set = new(zone()) BitVector(phi_count, graph_->zone());
connected_set->Add(i); connected_set->Add(i);
connected_phis.Add(connected_set); connected_phis.Add(connected_set);
} }
@ -2105,7 +2128,7 @@ void HGraph::MarkDeoptimizeOnUndefined() {
void HGraph::ComputeMinusZeroChecks() { void HGraph::ComputeMinusZeroChecks() {
BitVector visited(GetMaximumValueID()); BitVector visited(GetMaximumValueID(), zone());
for (int i = 0; i < blocks_.length(); ++i) { for (int i = 0; i < blocks_.length(); ++i) {
for (HInstruction* current = blocks_[i]->first(); for (HInstruction* current = blocks_[i]->first();
current != NULL; current != NULL;
@ -2443,7 +2466,7 @@ HGraph* HGraphBuilder::CreateGraph() {
// Handle implicit declaration of the function name in named function // Handle implicit declaration of the function name in named function
// expressions before other declarations. // expressions before other declarations.
if (scope->is_function_scope() && scope->function() != NULL) { if (scope->is_function_scope() && scope->function() != NULL) {
HandleVariableDeclaration(scope->function(), CONST, NULL); HandleVariableDeclaration(scope->function(), CONST, NULL, NULL);
} }
VisitDeclarations(scope->declarations()); VisitDeclarations(scope->declarations());
AddSimulate(AstNode::kDeclarationsId); AddSimulate(AstNode::kDeclarationsId);
@ -2721,12 +2744,20 @@ void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
HBasicBlock* HGraphBuilder::BreakAndContinueScope::Get( HBasicBlock* HGraphBuilder::BreakAndContinueScope::Get(
BreakableStatement* stmt, BreakableStatement* stmt,
BreakType type) { BreakType type,
int* drop_extra) {
*drop_extra = 0;
BreakAndContinueScope* current = this; BreakAndContinueScope* current = this;
while (current != NULL && current->info()->target() != stmt) { while (current != NULL && current->info()->target() != stmt) {
*drop_extra += current->info()->drop_extra();
current = current->next(); current = current->next();
} }
ASSERT(current != NULL); // Always found (unless stack is malformed). ASSERT(current != NULL); // Always found (unless stack is malformed).
if (type == BREAK) {
*drop_extra += current->info()->drop_extra();
}
HBasicBlock* block = NULL; HBasicBlock* block = NULL;
switch (type) { switch (type) {
case BREAK: case BREAK:
@ -2754,7 +2785,11 @@ void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
ASSERT(!HasStackOverflow()); ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL); ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor()); ASSERT(current_block()->HasPredecessor());
HBasicBlock* continue_block = break_scope()->Get(stmt->target(), CONTINUE); int drop_extra = 0;
HBasicBlock* continue_block = break_scope()->Get(stmt->target(),
CONTINUE,
&drop_extra);
Drop(drop_extra);
current_block()->Goto(continue_block); current_block()->Goto(continue_block);
set_current_block(NULL); set_current_block(NULL);
} }
@ -2764,7 +2799,11 @@ void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
ASSERT(!HasStackOverflow()); ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL); ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor()); ASSERT(current_block()->HasPredecessor());
HBasicBlock* break_block = break_scope()->Get(stmt->target(), BREAK); int drop_extra = 0;
HBasicBlock* break_block = break_scope()->Get(stmt->target(),
BREAK,
&drop_extra);
Drop(drop_extra);
current_block()->Goto(break_block); current_block()->Goto(break_block);
set_current_block(NULL); set_current_block(NULL);
} }
@ -3019,15 +3058,24 @@ void HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
set_current_block(osr_entry); set_current_block(osr_entry);
int osr_entry_id = statement->OsrEntryId(); int osr_entry_id = statement->OsrEntryId();
// We want the correct environment at the OsrEntry instruction. Build int first_expression_index = environment()->first_expression_index();
// it explicitly. The expression stack should be empty. int length = environment()->length();
ASSERT(environment()->ExpressionStackIsEmpty()); for (int i = 0; i < first_expression_index; ++i) {
for (int i = 0; i < environment()->length(); ++i) {
HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue; HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
AddInstruction(osr_value); AddInstruction(osr_value);
environment()->Bind(i, osr_value); environment()->Bind(i, osr_value);
} }
if (first_expression_index != length) {
environment()->Drop(length - first_expression_index);
for (int i = first_expression_index; i < length; ++i) {
HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
AddInstruction(osr_value);
environment()->Push(osr_value);
}
}
AddSimulate(osr_entry_id); AddSimulate(osr_entry_id);
AddInstruction(new(zone()) HOsrEntry(osr_entry_id)); AddInstruction(new(zone()) HOsrEntry(osr_entry_id));
HContext* context = new(zone()) HContext; HContext* context = new(zone()) HContext;
@ -3125,7 +3173,6 @@ void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
BreakAndContinueInfo break_info(stmt); BreakAndContinueInfo break_info(stmt);
if (current_block() != NULL) { if (current_block() != NULL) {
BreakAndContinueScope push(&break_info, this);
CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info)); CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
} }
HBasicBlock* body_exit = HBasicBlock* body_exit =
@ -3170,7 +3217,6 @@ void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
BreakAndContinueInfo break_info(stmt); BreakAndContinueInfo break_info(stmt);
if (current_block() != NULL) { if (current_block() != NULL) {
BreakAndContinueScope push(&break_info, this);
CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info)); CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
} }
HBasicBlock* body_exit = HBasicBlock* body_exit =
@ -3195,7 +3241,110 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
ASSERT(!HasStackOverflow()); ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL); ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor()); ASSERT(current_block()->HasPredecessor());
return Bailout("ForInStatement");
if (!stmt->each()->IsVariableProxy() ||
!stmt->each()->AsVariableProxy()->var()->IsStackLocal()) {
return Bailout("ForInStatement with non-local each variable");
}
Variable* each_var = stmt->each()->AsVariableProxy()->var();
CHECK_ALIVE(VisitForValue(stmt->enumerable()));
HValue* enumerable = Top(); // Leave enumerable at the top.
HValue* context = environment()->LookupContext();
HInstruction* map = AddInstruction(new(zone()) HForInPrepareMap(
context, enumerable));
AddSimulate(stmt->PrepareId());
HInstruction* array = AddInstruction(
new(zone()) HForInCacheArray(
enumerable,
map,
DescriptorArray::kEnumCacheBridgeCacheIndex));
HInstruction* array_length = AddInstruction(
new(zone()) HFixedArrayBaseLength(array));
HInstruction* start_index = AddInstruction(new(zone()) HConstant(
Handle<Object>(Smi::FromInt(0)), Representation::Integer32()));
Push(map);
Push(array);
Push(array_length);
Push(start_index);
HInstruction* index_cache = AddInstruction(
new(zone()) HForInCacheArray(
enumerable,
map,
DescriptorArray::kEnumCacheBridgeIndicesCacheIndex));
HForInCacheArray::cast(array)->set_index_cache(
HForInCacheArray::cast(index_cache));
PreProcessOsrEntry(stmt);
HBasicBlock* loop_entry = CreateLoopHeaderBlock();
current_block()->Goto(loop_entry);
set_current_block(loop_entry);
HValue* index = environment()->ExpressionStackAt(0);
HValue* limit = environment()->ExpressionStackAt(1);
// Check that we still have more keys.
HCompareIDAndBranch* compare_index =
new(zone()) HCompareIDAndBranch(index, limit, Token::LT);
compare_index->SetInputRepresentation(Representation::Integer32());
HBasicBlock* loop_body = graph()->CreateBasicBlock();
HBasicBlock* loop_successor = graph()->CreateBasicBlock();
compare_index->SetSuccessorAt(0, loop_body);
compare_index->SetSuccessorAt(1, loop_successor);
current_block()->Finish(compare_index);
set_current_block(loop_successor);
Drop(5);
set_current_block(loop_body);
HValue* key = AddInstruction(
new(zone()) HLoadKeyedFastElement(
environment()->ExpressionStackAt(2), // Enum cache.
environment()->ExpressionStackAt(0), // Iteration index.
HLoadKeyedFastElement::OMIT_HOLE_CHECK));
// Check if the expected map still matches that of the enumerable.
// If not just deoptimize.
AddInstruction(new(zone()) HCheckMapValue(
environment()->ExpressionStackAt(4),
environment()->ExpressionStackAt(3)));
Bind(each_var, key);
BreakAndContinueInfo break_info(stmt, 5);
CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
HBasicBlock* body_exit =
JoinContinue(stmt, current_block(), break_info.continue_block());
if (body_exit != NULL) {
set_current_block(body_exit);
HValue* current_index = Pop();
PushAndAdd(
new(zone()) HAdd(context, current_index, graph()->GetConstant1()));
body_exit = current_block();
}
HBasicBlock* loop_exit = CreateLoop(stmt,
loop_entry,
body_exit,
loop_successor,
break_info.break_block());
set_current_block(loop_exit);
} }
@ -3437,20 +3586,36 @@ void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
} }
// Determines whether the given object literal boilerplate satisfies all // Determines whether the given array or object literal boilerplate satisfies
// limits to be considered for fast deep-copying and computes the total // all limits to be considered for fast deep-copying and computes the total
// size of all objects that are part of the graph. // size of all objects that are part of the graph.
static bool IsFastObjectLiteral(Handle<JSObject> boilerplate, static bool IsFastLiteral(Handle<JSObject> boilerplate,
int max_depth, int max_depth,
int* max_properties, int* max_properties,
int* total_size) { int* total_size) {
if (max_depth <= 0) return false; ASSERT(max_depth >= 0 && *max_properties >= 0);
if (max_depth == 0) return false;
Handle<FixedArrayBase> elements(boilerplate->elements()); Handle<FixedArrayBase> elements(boilerplate->elements());
if (elements->length() > 0 && if (elements->length() > 0 &&
elements->map() != HEAP->fixed_cow_array_map()) { elements->map() != boilerplate->GetHeap()->fixed_cow_array_map()) {
if (!boilerplate->HasFastElements()) return false;
int length = elements->length();
for (int i = 0; i < length; i++) {
if ((*max_properties)-- == 0) return false;
Handle<Object> value = JSObject::GetElement(boilerplate, i);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
if (!IsFastLiteral(value_object,
max_depth - 1,
max_properties,
total_size)) {
return false; return false;
} }
}
}
*total_size += FixedArray::SizeFor(length);
}
Handle<FixedArray> properties(boilerplate->properties()); Handle<FixedArray> properties(boilerplate->properties());
if (properties->length() > 0) { if (properties->length() > 0) {
@ -3458,11 +3623,11 @@ static bool IsFastObjectLiteral(Handle<JSObject> boilerplate,
} else { } else {
int nof = boilerplate->map()->inobject_properties(); int nof = boilerplate->map()->inobject_properties();
for (int i = 0; i < nof; i++) { for (int i = 0; i < nof; i++) {
if ((*max_properties)-- <= 0) return false; if ((*max_properties)-- == 0) return false;
Handle<Object> value(boilerplate->InObjectPropertyAt(i)); Handle<Object> value(boilerplate->InObjectPropertyAt(i));
if (value->IsJSObject()) { if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value); Handle<JSObject> value_object = Handle<JSObject>::cast(value);
if (!IsFastObjectLiteral(value_object, if (!IsFastLiteral(value_object,
max_depth - 1, max_depth - 1,
max_properties, max_properties,
total_size)) { total_size)) {
@ -3487,21 +3652,21 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// Check whether to use fast or slow deep-copying for boilerplate. // Check whether to use fast or slow deep-copying for boilerplate.
int total_size = 0; int total_size = 0;
int max_properties = HObjectLiteralFast::kMaxObjectLiteralProperties; int max_properties = HFastLiteral::kMaxLiteralProperties;
Handle<Object> boilerplate(closure->literals()->get(expr->literal_index())); Handle<Object> boilerplate(closure->literals()->get(expr->literal_index()));
if (boilerplate->IsJSObject() && if (boilerplate->IsJSObject() &&
IsFastObjectLiteral(Handle<JSObject>::cast(boilerplate), IsFastLiteral(Handle<JSObject>::cast(boilerplate),
HObjectLiteralFast::kMaxObjectLiteralDepth, HFastLiteral::kMaxLiteralDepth,
&max_properties, &max_properties,
&total_size)) { &total_size)) {
Handle<JSObject> boilerplate_object = Handle<JSObject>::cast(boilerplate); Handle<JSObject> boilerplate_object = Handle<JSObject>::cast(boilerplate);
literal = new(zone()) HObjectLiteralFast(context, literal = new(zone()) HFastLiteral(context,
boilerplate_object, boilerplate_object,
total_size, total_size,
expr->literal_index(), expr->literal_index(),
expr->depth()); expr->depth());
} else { } else {
literal = new(zone()) HObjectLiteralGeneric(context, literal = new(zone()) HObjectLiteral(context,
expr->constant_properties(), expr->constant_properties(),
expr->fast_elements(), expr->fast_elements(),
expr->literal_index(), expr->literal_index(),
@ -3577,6 +3742,7 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
ZoneList<Expression*>* subexprs = expr->values(); ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length(); int length = subexprs->length();
HValue* context = environment()->LookupContext(); HValue* context = environment()->LookupContext();
HInstruction* literal;
Handle<FixedArray> literals(environment()->closure()->literals()); Handle<FixedArray> literals(environment()->closure()->literals());
Handle<Object> raw_boilerplate(literals->get(expr->literal_index())); Handle<Object> raw_boilerplate(literals->get(expr->literal_index()));
@ -3598,12 +3764,25 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
ElementsKind boilerplate_elements_kind = ElementsKind boilerplate_elements_kind =
Handle<JSObject>::cast(boilerplate)->GetElementsKind(); Handle<JSObject>::cast(boilerplate)->GetElementsKind();
HArrayLiteral* literal = new(zone()) HArrayLiteral( // Check whether to use fast or slow deep-copying for boilerplate.
context, int total_size = 0;
int max_properties = HFastLiteral::kMaxLiteralProperties;
if (IsFastLiteral(boilerplate,
HFastLiteral::kMaxLiteralDepth,
&max_properties,
&total_size)) {
literal = new(zone()) HFastLiteral(context,
boilerplate,
total_size,
expr->literal_index(),
expr->depth());
} else {
literal = new(zone()) HArrayLiteral(context,
boilerplate, boilerplate,
length, length,
expr->literal_index(), expr->literal_index(),
expr->depth()); expr->depth());
}
// The array is expected in the bailout environment during computation // The array is expected in the bailout environment during computation
// of the property values and is the value of the entire expression. // of the property values and is the value of the entire expression.
@ -4454,7 +4633,7 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
Handle<Map> map = maps->at(i); Handle<Map> map = maps->at(i);
ASSERT(map->IsMap()); ASSERT(map->IsMap());
if (!transition_target.at(i).is_null()) { if (!transition_target.at(i).is_null()) {
object = AddInstruction(new(zone()) HTransitionElementsKind( AddInstruction(new(zone()) HTransitionElementsKind(
object, map, transition_target.at(i))); object, map, transition_target.at(i)));
} else { } else {
type_todo[map->elements_kind()] = true; type_todo[map->elements_kind()] = true;
@ -4917,7 +5096,7 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
TraceInline(target, caller, "target not inlineable"); TraceInline(target, caller, "target not inlineable");
return false; return false;
} }
if (target_shared->dont_inline() || target_shared->dont_crankshaft()) { if (target_shared->dont_inline() || target_shared->dont_optimize()) {
TraceInline(target, caller, "target contains unsupported syntax [early]"); TraceInline(target, caller, "target contains unsupported syntax [early]");
return false; return false;
} }
@ -4979,7 +5158,7 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
if (target_info.isolate()->has_pending_exception()) { if (target_info.isolate()->has_pending_exception()) {
// Parse or scope error, never optimize this function. // Parse or scope error, never optimize this function.
SetStackOverflow(); SetStackOverflow();
target_shared->DisableOptimization(*target); target_shared->DisableOptimization();
} }
TraceInline(target, caller, "parse failure"); TraceInline(target, caller, "parse failure");
return false; return false;
@ -5092,7 +5271,7 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
// Bail out if the inline function did, as we cannot residualize a call // Bail out if the inline function did, as we cannot residualize a call
// instead. // instead.
TraceInline(target, caller, "inline graph construction failed"); TraceInline(target, caller, "inline graph construction failed");
target_shared->DisableOptimization(*target); target_shared->DisableOptimization();
inline_bailout_ = true; inline_bailout_ = true;
delete target_state; delete target_state;
return true; return true;
@ -5173,7 +5352,6 @@ bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) {
BuiltinFunctionId id = expr->target()->shared()->builtin_function_id(); BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
switch (id) { switch (id) {
case kMathRound: case kMathRound:
case kMathFloor:
case kMathAbs: case kMathAbs:
case kMathSqrt: case kMathSqrt:
case kMathLog: case kMathLog:
@ -5311,32 +5489,43 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
AddCheckConstantFunction(expr, receiver, receiver_map, true); AddCheckConstantFunction(expr, receiver, receiver_map, true);
HValue* right = Pop(); HValue* right = Pop();
HValue* left = Pop(); HValue* left = Pop();
// Do not inline if the return representation is not certain. Pop(); // Pop receiver.
if (!left->representation().Equals(right->representation())) {
Push(left); HValue* left_operand = left;
Push(right); HValue* right_operand = right;
return false;
// If we do not have two integers, we convert to double for comparison.
if (!left->representation().IsInteger32() ||
!right->representation().IsInteger32()) {
if (!left->representation().IsDouble()) {
HChange* left_convert = new(zone()) HChange(
left,
Representation::Double(),
false, // Do not truncate when converting to double.
true); // Deoptimize for undefined.
left_convert->SetFlag(HValue::kBailoutOnMinusZero);
left_operand = AddInstruction(left_convert);
}
if (!right->representation().IsDouble()) {
HChange* right_convert = new(zone()) HChange(
right,
Representation::Double(),
false, // Do not truncate when converting to double.
true); // Deoptimize for undefined.
right_convert->SetFlag(HValue::kBailoutOnMinusZero);
right_operand = AddInstruction(right_convert);
}
} }
Pop(); // Pop receiver. ASSERT(left_operand->representation().Equals(
right_operand->representation()));
ASSERT(!left_operand->representation().IsTagged());
Token::Value op = (id == kMathMin) ? Token::LT : Token::GT; Token::Value op = (id == kMathMin) ? Token::LT : Token::GT;
HCompareIDAndBranch* compare = NULL;
HCompareIDAndBranch* compare =
if (left->representation().IsTagged()) { new(zone()) HCompareIDAndBranch(left_operand, right_operand, op);
HChange* left_cvt = compare->SetInputRepresentation(left_operand->representation());
new(zone()) HChange(left, Representation::Double(), false, true);
left_cvt->SetFlag(HValue::kBailoutOnMinusZero);
AddInstruction(left_cvt);
HChange* right_cvt =
new(zone()) HChange(right, Representation::Double(), false, true);
right_cvt->SetFlag(HValue::kBailoutOnMinusZero);
AddInstruction(right_cvt);
compare = new(zone()) HCompareIDAndBranch(left_cvt, right_cvt, op);
compare->SetInputRepresentation(Representation::Double());
} else {
compare = new(zone()) HCompareIDAndBranch(left, right, op);
compare->SetInputRepresentation(left->representation());
}
HBasicBlock* return_left = graph()->CreateBasicBlock(); HBasicBlock* return_left = graph()->CreateBasicBlock();
HBasicBlock* return_right = graph()->CreateBasicBlock(); HBasicBlock* return_right = graph()->CreateBasicBlock();
@ -6541,26 +6730,81 @@ void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) { void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
HandleVariableDeclaration(decl->proxy(), decl->mode(), decl->fun()); UNREACHABLE();
}
void HGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
int length = declarations->length();
int global_count = 0;
for (int i = 0; i < declarations->length(); i++) {
VariableDeclaration* decl = declarations->at(i)->AsVariableDeclaration();
if (decl == NULL) continue;
HandleVariableDeclaration(decl->proxy(),
decl->mode(),
decl->fun(),
&global_count);
}
// Batch declare global functions and variables.
if (global_count > 0) {
Handle<FixedArray> array =
isolate()->factory()->NewFixedArray(2 * global_count, TENURED);
for (int j = 0, i = 0; i < length; i++) {
VariableDeclaration* decl = declarations->at(i)->AsVariableDeclaration();
if (decl == NULL) continue;
Variable* var = decl->proxy()->var();
if (var->IsUnallocated()) {
array->set(j++, *(var->name()));
if (decl->fun() == NULL) {
if (var->binding_needs_init()) {
// In case this binding needs initialization use the hole.
array->set_the_hole(j++);
} else {
array->set_undefined(j++);
}
} else {
Handle<SharedFunctionInfo> function =
Compiler::BuildFunctionInfo(decl->fun(), info()->script());
// Check for stack-overflow exception.
if (function.is_null()) {
SetStackOverflow();
return;
}
array->set(j++, *function);
}
}
}
int flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
DeclareGlobalsNativeFlag::encode(info()->is_native()) |
DeclareGlobalsLanguageMode::encode(info()->language_mode());
HInstruction* result =
new(zone()) HDeclareGlobals(environment()->LookupContext(),
array,
flags);
AddInstruction(result);
}
} }
void HGraphBuilder::HandleVariableDeclaration(VariableProxy* proxy, void HGraphBuilder::HandleVariableDeclaration(VariableProxy* proxy,
VariableMode mode, VariableMode mode,
FunctionLiteral* function) { FunctionLiteral* function,
int* global_count) {
Variable* var = proxy->var(); Variable* var = proxy->var();
bool binding_needs_init = bool binding_needs_init =
(mode == CONST || mode == CONST_HARMONY || mode == LET); (mode == CONST || mode == CONST_HARMONY || mode == LET);
switch (var->location()) { switch (var->location()) {
case Variable::UNALLOCATED: case Variable::UNALLOCATED:
return Bailout("unsupported global declaration"); ++(*global_count);
return;
case Variable::PARAMETER: case Variable::PARAMETER:
case Variable::LOCAL: case Variable::LOCAL:
case Variable::CONTEXT: case Variable::CONTEXT:
if (binding_needs_init || function != NULL) { if (binding_needs_init || function != NULL) {
HValue* value = NULL; HValue* value = NULL;
if (function != NULL) { if (function != NULL) {
VisitForValue(function); CHECK_ALIVE(VisitForValue(function));
value = Pop(); value = Pop();
} else { } else {
value = graph()->GetConstantHole(); value = graph()->GetConstantHole();
@ -7211,9 +7455,8 @@ bool HEnvironment::HasExpressionAt(int index) const {
bool HEnvironment::ExpressionStackIsEmpty() const { bool HEnvironment::ExpressionStackIsEmpty() const {
int first_expression = parameter_count() + specials_count() + local_count(); ASSERT(length() >= first_expression_index());
ASSERT(length() >= first_expression); return length() == first_expression_index();
return length() == first_expression;
} }
@ -7501,7 +7744,7 @@ void HTracer::TraceLiveRange(LiveRange* range, const char* type) {
PrintIndent(); PrintIndent();
trace_.Add("%d %s", range->id(), type); trace_.Add("%d %s", range->id(), type);
if (range->HasRegisterAssigned()) { if (range->HasRegisterAssigned()) {
LOperand* op = range->CreateAssignedOperand(); LOperand* op = range->CreateAssignedOperand(ZONE);
int assigned_reg = op->index(); int assigned_reg = op->index();
if (op->IsDoubleRegister()) { if (op->IsDoubleRegister()) {
trace_.Add(" \"%s\"", trace_.Add(" \"%s\"",

21
deps/v8/src/hydrogen.h

@ -399,6 +399,10 @@ class HEnvironment: public ZoneObject {
return i >= parameter_count() && i < parameter_count() + specials_count(); return i >= parameter_count() && i < parameter_count() + specials_count();
} }
int first_expression_index() const {
return parameter_count() + specials_count() + local_count();
}
void Bind(Variable* variable, HValue* value) { void Bind(Variable* variable, HValue* value) {
Bind(IndexFor(variable), value); Bind(IndexFor(variable), value);
} }
@ -705,8 +709,12 @@ class HGraphBuilder: public AstVisitor {
// can have a separate lifetime. // can have a separate lifetime.
class BreakAndContinueInfo BASE_EMBEDDED { class BreakAndContinueInfo BASE_EMBEDDED {
public: public:
explicit BreakAndContinueInfo(BreakableStatement* target) explicit BreakAndContinueInfo(BreakableStatement* target,
: target_(target), break_block_(NULL), continue_block_(NULL) { int drop_extra = 0)
: target_(target),
break_block_(NULL),
continue_block_(NULL),
drop_extra_(drop_extra) {
} }
BreakableStatement* target() { return target_; } BreakableStatement* target() { return target_; }
@ -714,11 +722,13 @@ class HGraphBuilder: public AstVisitor {
void set_break_block(HBasicBlock* block) { break_block_ = block; } void set_break_block(HBasicBlock* block) { break_block_ = block; }
HBasicBlock* continue_block() { return continue_block_; } HBasicBlock* continue_block() { return continue_block_; }
void set_continue_block(HBasicBlock* block) { continue_block_ = block; } void set_continue_block(HBasicBlock* block) { continue_block_ = block; }
int drop_extra() { return drop_extra_; }
private: private:
BreakableStatement* target_; BreakableStatement* target_;
HBasicBlock* break_block_; HBasicBlock* break_block_;
HBasicBlock* continue_block_; HBasicBlock* continue_block_;
int drop_extra_;
}; };
// A helper class to maintain a stack of current BreakAndContinueInfo // A helper class to maintain a stack of current BreakAndContinueInfo
@ -737,7 +747,7 @@ class HGraphBuilder: public AstVisitor {
BreakAndContinueScope* next() { return next_; } BreakAndContinueScope* next() { return next_; }
// Search the break stack for a break or continue target. // Search the break stack for a break or continue target.
HBasicBlock* Get(BreakableStatement* stmt, BreakType type); HBasicBlock* Get(BreakableStatement* stmt, BreakType type, int* drop_extra);
private: private:
BreakAndContinueInfo* info_; BreakAndContinueInfo* info_;
@ -780,6 +790,8 @@ class HGraphBuilder: public AstVisitor {
FunctionState* function_state() const { return function_state_; } FunctionState* function_state() const { return function_state_; }
void VisitDeclarations(ZoneList<Declaration*>* declarations);
private: private:
// Type of a member function that generates inline code for a native function. // Type of a member function that generates inline code for a native function.
typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call); typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
@ -841,7 +853,8 @@ class HGraphBuilder: public AstVisitor {
void HandleVariableDeclaration(VariableProxy* proxy, void HandleVariableDeclaration(VariableProxy* proxy,
VariableMode mode, VariableMode mode,
FunctionLiteral* function); FunctionLiteral* function,
int* global_count);
void VisitDelete(UnaryOperation* expr); void VisitDelete(UnaryOperation* expr);
void VisitVoid(UnaryOperation* expr); void VisitVoid(UnaryOperation* expr);

18
deps/v8/src/ia32/assembler-ia32.cc

@ -32,7 +32,7 @@
// The original source code covered by the above license above has been modified // The original source code covered by the above license above has been modified
// significantly by Google Inc. // significantly by Google Inc.
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
#include "v8.h" #include "v8.h"
@ -575,7 +575,7 @@ void Assembler::leave() {
void Assembler::mov_b(Register dst, const Operand& src) { void Assembler::mov_b(Register dst, const Operand& src) {
ASSERT(dst.code() < 4); CHECK(dst.is_byte_register());
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
EMIT(0x8A); EMIT(0x8A);
emit_operand(dst, src); emit_operand(dst, src);
@ -591,7 +591,7 @@ void Assembler::mov_b(const Operand& dst, int8_t imm8) {
void Assembler::mov_b(const Operand& dst, Register src) { void Assembler::mov_b(const Operand& dst, Register src) {
ASSERT(src.code() < 4); CHECK(src.is_byte_register());
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
EMIT(0x88); EMIT(0x88);
emit_operand(src, dst); emit_operand(src, dst);
@ -829,7 +829,7 @@ void Assembler::cmpb(const Operand& op, int8_t imm8) {
void Assembler::cmpb(const Operand& op, Register reg) { void Assembler::cmpb(const Operand& op, Register reg) {
ASSERT(reg.is_byte_register()); CHECK(reg.is_byte_register());
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
EMIT(0x38); EMIT(0x38);
emit_operand(reg, op); emit_operand(reg, op);
@ -837,7 +837,7 @@ void Assembler::cmpb(const Operand& op, Register reg) {
void Assembler::cmpb(Register reg, const Operand& op) { void Assembler::cmpb(Register reg, const Operand& op) {
ASSERT(reg.is_byte_register()); CHECK(reg.is_byte_register());
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
EMIT(0x3A); EMIT(0x3A);
emit_operand(reg, op); emit_operand(reg, op);
@ -901,6 +901,7 @@ void Assembler::cmpw_ax(const Operand& op) {
void Assembler::dec_b(Register dst) { void Assembler::dec_b(Register dst) {
CHECK(dst.is_byte_register());
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
EMIT(0xFE); EMIT(0xFE);
EMIT(0xC8 | dst.code()); EMIT(0xC8 | dst.code());
@ -1174,7 +1175,9 @@ void Assembler::test(Register reg, const Immediate& imm) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
// Only use test against byte for registers that have a byte // Only use test against byte for registers that have a byte
// variant: eax, ebx, ecx, and edx. // variant: eax, ebx, ecx, and edx.
if (imm.rmode_ == RelocInfo::NONE && is_uint8(imm.x_) && reg.code() < 4) { if (imm.rmode_ == RelocInfo::NONE &&
is_uint8(imm.x_) &&
reg.is_byte_register()) {
uint8_t imm8 = imm.x_; uint8_t imm8 = imm.x_;
if (reg.is(eax)) { if (reg.is(eax)) {
EMIT(0xA8); EMIT(0xA8);
@ -1204,6 +1207,7 @@ void Assembler::test(Register reg, const Operand& op) {
void Assembler::test_b(Register reg, const Operand& op) { void Assembler::test_b(Register reg, const Operand& op) {
CHECK(reg.is_byte_register());
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
EMIT(0x84); EMIT(0x84);
emit_operand(reg, op); emit_operand(reg, op);
@ -1219,7 +1223,7 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
void Assembler::test_b(const Operand& op, uint8_t imm8) { void Assembler::test_b(const Operand& op, uint8_t imm8) {
if (op.is_reg_only() && op.reg().code() >= 4) { if (op.is_reg_only() && !op.reg().is_byte_register()) {
test(op, Immediate(imm8)); test(op, Immediate(imm8));
return; return;
} }

42
deps/v8/src/ia32/builtins-ia32.cc

@ -1088,7 +1088,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
bool construct_call, bool construct_call,
Label* call_generic_code) { Label* call_generic_code) {
Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call, Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call,
empty_array, not_empty_array; empty_array, not_empty_array, finish, cant_transition_map, not_double;
// Push the constructor and argc. No need to tag argc as a smi, as there will // Push the constructor and argc. No need to tag argc as a smi, as there will
// be no garbage collection with this on the stack. // be no garbage collection with this on the stack.
@ -1247,6 +1247,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
// esp[8]: constructor (only if construct_call) // esp[8]: constructor (only if construct_call)
// esp[12]: return address // esp[12]: return address
// esp[16]: last argument // esp[16]: last argument
__ bind(&finish);
__ mov(ecx, Operand(esp, last_arg_offset - kPointerSize)); __ mov(ecx, Operand(esp, last_arg_offset - kPointerSize));
__ pop(eax); __ pop(eax);
__ pop(ebx); __ pop(ebx);
@ -1255,9 +1256,43 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ jmp(ecx); __ jmp(ecx);
__ bind(&has_non_smi_element); __ bind(&has_non_smi_element);
// Double values are handled by the runtime.
__ CheckMap(eax,
masm->isolate()->factory()->heap_number_map(),
&not_double,
DONT_DO_SMI_CHECK);
__ bind(&cant_transition_map);
// Throw away the array that's only been partially constructed. // Throw away the array that's only been partially constructed.
__ pop(eax); __ pop(eax);
__ UndoAllocationInNewSpace(eax); __ UndoAllocationInNewSpace(eax);
__ jmp(&prepare_generic_code_call);
__ bind(&not_double);
// Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
__ mov(ebx, Operand(esp, 0));
__ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(
FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
edi,
eax,
&cant_transition_map);
__ mov(FieldOperand(ebx, HeapObject::kMapOffset), edi);
__ RecordWriteField(ebx, HeapObject::kMapOffset, edi, eax,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Prepare to re-enter the loop
__ lea(edi, Operand(esp, last_arg_offset));
// Finish the array initialization loop.
Label loop2;
__ bind(&loop2);
__ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
__ mov(Operand(edx, 0), eax);
__ add(edx, Immediate(kPointerSize));
__ dec(ecx);
__ j(greater_equal, &loop2);
__ jmp(&finish);
// Restore argc and constructor before running the generic code. // Restore argc and constructor before running the generic code.
__ bind(&prepare_generic_code_call); __ bind(&prepare_generic_code_call);
@ -1659,8 +1694,9 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ j(not_equal, &skip, Label::kNear); __ j(not_equal, &skip, Label::kNear);
__ ret(0); __ ret(0);
// If we decide not to perform on-stack replacement we perform a // Insert a stack guard check so that if we decide not to perform
// stack guard check to enable interrupts. // on-stack replacement right away, the function calling this stub can
// still be interrupted.
__ bind(&stack_check); __ bind(&stack_check);
Label ok; Label ok;
ExternalReference stack_limit = ExternalReference stack_limit =

39
deps/v8/src/ia32/code-stubs-ia32.cc

@ -3922,7 +3922,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Throw(eax); __ Throw(eax);
__ bind(&throw_termination_exception); __ bind(&throw_termination_exception);
__ ThrowUncatchable(TERMINATION, eax); __ ThrowUncatchable(eax);
__ bind(&failure); __ bind(&failure);
// For failure to match, return null. // For failure to match, return null.
@ -4573,6 +4573,11 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
} }
void InterruptStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kInterrupt, 0, 1);
}
static void GenerateRecordCallTarget(MacroAssembler* masm) { static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states // Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and // are uninitialized, monomorphic (indicated by a JSFunction), and
@ -4780,11 +4785,6 @@ void CEntryStub::GenerateAheadOfTime() {
} }
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ Throw(eax);
}
void CEntryStub::GenerateCore(MacroAssembler* masm, void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception, Label* throw_normal_exception,
Label* throw_termination_exception, Label* throw_termination_exception,
@ -4903,12 +4903,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
} }
void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type) {
__ ThrowUncatchable(type, eax);
}
void CEntryStub::Generate(MacroAssembler* masm) { void CEntryStub::Generate(MacroAssembler* masm) {
// eax: number of arguments including receiver // eax: number of arguments including receiver
// ebx: pointer to C function (C callee-saved) // ebx: pointer to C function (C callee-saved)
@ -4962,13 +4956,24 @@ void CEntryStub::Generate(MacroAssembler* masm) {
true); true);
__ bind(&throw_out_of_memory_exception); __ bind(&throw_out_of_memory_exception);
GenerateThrowUncatchable(masm, OUT_OF_MEMORY); // Set external caught exception to false.
Isolate* isolate = masm->isolate();
ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
isolate);
__ mov(Operand::StaticVariable(external_caught), Immediate(false));
// Set pending exception and eax to out of memory exception.
ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
isolate);
__ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
__ mov(Operand::StaticVariable(pending_exception), eax);
// Fall through to the next label.
__ bind(&throw_termination_exception); __ bind(&throw_termination_exception);
GenerateThrowUncatchable(masm, TERMINATION); __ ThrowUncatchable(eax);
__ bind(&throw_normal_exception); __ bind(&throw_normal_exception);
GenerateThrowTOS(masm); __ Throw(eax);
} }
@ -7041,11 +7046,13 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreIC::GenerateGeneric. // KeyedStoreIC::GenerateGeneric.
{ ebx, edx, ecx, EMIT_REMEMBERED_SET}, { ebx, edx, ecx, EMIT_REMEMBERED_SET},
// KeyedStoreStubCompiler::GenerateStoreFastElement. // KeyedStoreStubCompiler::GenerateStoreFastElement.
{ edi, edx, ecx, EMIT_REMEMBERED_SET}, { edi, ebx, ecx, EMIT_REMEMBERED_SET},
{ edx, edi, ebx, EMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateSmiOnlyToObject // ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject // and ElementsTransitionGenerator::GenerateDoubleToObject
{ edx, ebx, edi, EMIT_REMEMBERED_SET}, { edx, ebx, edi, EMIT_REMEMBERED_SET},
{ edx, ebx, edi, OMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateDoubleToObject // ElementsTransitionGenerator::GenerateDoubleToObject
{ eax, edx, esi, EMIT_REMEMBERED_SET}, { eax, edx, esi, EMIT_REMEMBERED_SET},
{ edx, eax, edi, EMIT_REMEMBERED_SET}, { edx, eax, edi, EMIT_REMEMBERED_SET},

49
deps/v8/src/ia32/codegen-ia32.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -301,11 +301,17 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
// -- edx : receiver // -- edx : receiver
// -- esp[0] : return address // -- esp[0] : return address
// ----------------------------------- // -----------------------------------
Label loop, entry, convert_hole, gc_required; Label loop, entry, convert_hole, gc_required, only_change_map;
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
__ j(equal, &only_change_map);
__ push(eax); __ push(eax);
__ push(ebx); __ push(ebx);
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset)); __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
// Allocate new FixedDoubleArray. // Allocate new FixedDoubleArray.
@ -399,6 +405,11 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
__ pop(ebx); __ pop(ebx);
__ pop(eax); __ pop(eax);
// Restore esi.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ bind(&only_change_map);
// eax: value // eax: value
// ebx: target map // ebx: target map
// Set transitioned map. // Set transitioned map.
@ -408,10 +419,8 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
ebx, ebx,
edi, edi,
kDontSaveFPRegs, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK); OMIT_SMI_CHECK);
// Restore esi.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
} }
@ -424,12 +433,18 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// -- edx : receiver // -- edx : receiver
// -- esp[0] : return address // -- esp[0] : return address
// ----------------------------------- // -----------------------------------
Label loop, entry, convert_hole, gc_required; Label loop, entry, convert_hole, gc_required, only_change_map, success;
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
__ j(equal, &only_change_map);
__ push(eax); __ push(eax);
__ push(edx); __ push(edx);
__ push(ebx); __ push(ebx);
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset)); __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
// Allocate new FixedArray. // Allocate new FixedArray.
@ -446,6 +461,20 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ jmp(&entry); __ jmp(&entry);
// ebx: target map
// edx: receiver
// Set transitioned map.
__ bind(&only_change_map);
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
__ RecordWriteField(edx,
HeapObject::kMapOffset,
ebx,
edi,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ jmp(&success);
// Call into runtime if GC is required. // Call into runtime if GC is required.
__ bind(&gc_required); __ bind(&gc_required);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@ -507,7 +536,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
ebx, ebx,
edi, edi,
kDontSaveFPRegs, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK); OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created and filled FixedArray. // Replace receiver's backing store with newly created and filled FixedArray.
__ mov(FieldOperand(edx, JSObject::kElementsOffset), eax); __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
@ -522,6 +551,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Restore registers. // Restore registers.
__ pop(eax); __ pop(eax);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ bind(&success);
} }

41
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -205,6 +205,15 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
} }
static const byte kJnsInstruction = 0x79;
static const byte kJnsOffset = 0x11;
static const byte kJaeInstruction = 0x73;
static const byte kJaeOffset = 0x07;
static const byte kCallInstruction = 0xe8;
static const byte kNopByteOne = 0x66;
static const byte kNopByteTwo = 0x90;
void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address pc_after, Address pc_after,
Code* check_code, Code* check_code,
@ -228,11 +237,17 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
// call <on-stack replacment> // call <on-stack replacment>
// test eax, <loop nesting depth> // test eax, <loop nesting depth>
// ok: // ok:
ASSERT(*(call_target_address - 3) == 0x73 && // jae
*(call_target_address - 2) == 0x07 && // offset if (FLAG_count_based_interrupts) {
*(call_target_address - 1) == 0xe8); // call ASSERT(*(call_target_address - 3) == kJnsInstruction);
*(call_target_address - 3) = 0x66; // 2 byte nop part 1 ASSERT(*(call_target_address - 2) == kJnsOffset);
*(call_target_address - 2) = 0x90; // 2 byte nop part 2 } else {
ASSERT(*(call_target_address - 3) == kJaeInstruction);
ASSERT(*(call_target_address - 2) == kJaeOffset);
}
ASSERT(*(call_target_address - 1) == kCallInstruction);
*(call_target_address - 3) = kNopByteOne;
*(call_target_address - 2) = kNopByteTwo;
Assembler::set_target_address_at(call_target_address, Assembler::set_target_address_at(call_target_address,
replacement_code->entry()); replacement_code->entry());
@ -248,13 +263,19 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
Address call_target_address = pc_after - kIntSize; Address call_target_address = pc_after - kIntSize;
ASSERT(replacement_code->entry() == ASSERT(replacement_code->entry() ==
Assembler::target_address_at(call_target_address)); Assembler::target_address_at(call_target_address));
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
// restore the conditional branch. // restore the conditional branch.
ASSERT(*(call_target_address - 3) == 0x66 && // 2 byte nop part 1 ASSERT(*(call_target_address - 3) == kNopByteOne &&
*(call_target_address - 2) == 0x90 && // 2 byte nop part 2 *(call_target_address - 2) == kNopByteTwo &&
*(call_target_address - 1) == 0xe8); // call *(call_target_address - 1) == kCallInstruction);
*(call_target_address - 3) = 0x73; // jae if (FLAG_count_based_interrupts) {
*(call_target_address - 2) = 0x07; // offset *(call_target_address - 3) = kJnsInstruction;
*(call_target_address - 2) = kJnsOffset;
} else {
*(call_target_address - 3) = kJaeInstruction;
*(call_target_address - 2) = kJaeOffset;
}
Assembler::set_target_address_at(call_target_address, Assembler::set_target_address_at(call_target_address,
check_code->entry()); check_code->entry());

177
deps/v8/src/ia32/full-codegen-ia32.cc

@ -113,12 +113,12 @@ class JumpPatchSite BASE_EMBEDDED {
// //
// The function builds a JS frame. Please see JavaScriptFrameConstants in // The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-ia32.h for its layout. // frames-ia32.h for its layout.
void FullCodeGenerator::Generate(CompilationInfo* info) { void FullCodeGenerator::Generate() {
ASSERT(info_ == NULL); CompilationInfo* info = info_;
info_ = info;
scope_ = info->scope();
handler_table_ = handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
SetFunctionPosition(function()); SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator"); Comment cmnt(masm_, "[ function compiled by full code generator");
@ -132,7 +132,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// We can optionally optimize based on counters rather than statistical // We can optionally optimize based on counters rather than statistical
// sampling. // sampling.
if (info->ShouldSelfOptimize()) { if (info->ShouldSelfOptimize()) {
if (FLAG_trace_opt) { if (FLAG_trace_opt_verbose) {
PrintF("[adding self-optimization header to %s]\n", PrintF("[adding self-optimization header to %s]\n",
*info->function()->debug_name()->ToCString()); *info->function()->debug_name()->ToCString());
} }
@ -323,15 +323,34 @@ void FullCodeGenerator::ClearAccumulator() {
} }
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) { void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check"); Comment cmnt(masm_, "[ Stack check");
Label ok; Label ok;
if (FLAG_count_based_interrupts) {
int weight = 1;
if (FLAG_weighted_back_edges) {
ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(127, Max(1, distance / 100));
}
__ sub(Operand::Cell(profiling_counter_), Immediate(Smi::FromInt(weight)));
__ j(positive, &ok, Label::kNear);
InterruptStub stub;
__ CallStub(&stub);
} else {
// Count based interrupts happen often enough when they are enabled
// that the additional stack checks are not necessary (they would
// only check for interrupts).
ExternalReference stack_limit = ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate()); ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit)); __ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear); __ j(above_equal, &ok, Label::kNear);
StackCheckStub stub; StackCheckStub stub;
__ CallStub(&stub); __ CallStub(&stub);
}
// Record a mapping of this PC offset to the OSR id. This is used to find // Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into // the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code. // the deoptimization input data found in the optimized code.
@ -344,6 +363,12 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
ASSERT(loop_depth() > 0); ASSERT(loop_depth() > 0);
__ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker))); __ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
if (FLAG_count_based_interrupts) {
// Reset the countdown.
__ mov(Operand::Cell(profiling_counter_),
Immediate(Smi::FromInt(FLAG_interrupt_budget)));
}
__ bind(&ok); __ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR // Record a mapping of the OSR id to this PC. This is used if the OSR
@ -364,6 +389,26 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(eax); __ push(eax);
__ CallRuntime(Runtime::kTraceExit, 1); __ CallRuntime(Runtime::kTraceExit, 1);
} }
if (FLAG_interrupt_at_exit) {
// Pretend that the exit is a backwards jump to the entry.
int weight = 1;
if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(127, Max(1, distance / 100));
}
__ sub(Operand::Cell(profiling_counter_),
Immediate(Smi::FromInt(weight)));
Label ok;
__ j(positive, &ok, Label::kNear);
__ push(eax);
InterruptStub stub;
__ CallStub(&stub);
__ pop(eax);
// Reset the countdown.
__ mov(Operand::Cell(profiling_counter_),
Immediate(Smi::FromInt(FLAG_interrupt_budget)));
__ bind(&ok);
}
#ifdef DEBUG #ifdef DEBUG
// Add a label for checking the size of the code used for returning. // Add a label for checking the size of the code used for returning.
Label check_exit_codesize; Label check_exit_codesize;
@ -851,7 +896,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback. // Record position before stub call for type feedback.
SetSourcePosition(clause->position()); SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT); Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
__ call(ic, RelocInfo::CODE_TARGET, clause->CompareId()); CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
__ test(eax, eax); __ test(eax, eax);
__ j(not_equal, &next_test); __ j(not_equal, &next_test);
@ -900,6 +945,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(eax, isolate()->factory()->null_value()); __ cmp(eax, isolate()->factory()->null_value());
__ j(equal, &exit); __ j(equal, &exit);
PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
// Convert the object to a JS object. // Convert the object to a JS object.
Label convert, done_convert; Label convert, done_convert;
__ JumpIfSmi(eax, &convert, Label::kNear); __ JumpIfSmi(eax, &convert, Label::kNear);
@ -912,7 +959,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(eax); __ push(eax);
// Check for proxies. // Check for proxies.
Label call_runtime; Label call_runtime, use_cache, fixed_array;
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx); __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
__ j(below_equal, &call_runtime); __ j(below_equal, &call_runtime);
@ -921,61 +968,19 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// the JSObject::IsSimpleEnum cache validity checks. If we cannot // the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache // guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array. // validity or get the property names in a fixed array.
Label next; __ CheckEnumCache(&call_runtime);
__ mov(ecx, eax);
__ bind(&next);
// Check that there are no elements. Register ecx contains the
// current JS object we've reached through the prototype chain.
__ cmp(FieldOperand(ecx, JSObject::kElementsOffset),
isolate()->factory()->empty_fixed_array());
__ j(not_equal, &call_runtime);
// Check that instance descriptors are not empty so that we can
// check for an enum cache. Leave the map in ebx for the subsequent
// prototype load.
__ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
__ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOrBitField3Offset));
__ JumpIfSmi(edx, &call_runtime);
// Check that there is an enum cache in the non-empty instance
// descriptors (edx). This is the case if the next enumeration
// index field does not contain a smi.
__ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
__ JumpIfSmi(edx, &call_runtime);
// For all objects but the receiver, check that the cache is empty.
Label check_prototype;
__ cmp(ecx, eax);
__ j(equal, &check_prototype, Label::kNear);
__ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
__ cmp(edx, isolate()->factory()->empty_fixed_array());
__ j(not_equal, &call_runtime);
// Load the prototype from the map and loop if non-null.
__ bind(&check_prototype);
__ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
__ cmp(ecx, isolate()->factory()->null_value());
__ j(not_equal, &next);
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
Label use_cache;
__ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
__ jmp(&use_cache, Label::kNear); __ jmp(&use_cache, Label::kNear);
// Get the set of properties to enumerate. // Get the set of properties to enumerate.
__ bind(&call_runtime); __ bind(&call_runtime);
__ push(eax); // Duplicate the enumerable object on the stack. __ push(eax);
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1); __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
// to do a slow check.
Label fixed_array;
__ cmp(FieldOperand(eax, HeapObject::kMapOffset), __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map()); isolate()->factory()->meta_map());
__ j(not_equal, &fixed_array, Label::kNear); __ j(not_equal, &fixed_array);
// We got a map in register eax. Get the enumeration cache from it. // We got a map in register eax. Get the enumeration cache from it.
__ bind(&use_cache); __ bind(&use_cache);
@ -1008,6 +1013,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(Immediate(Smi::FromInt(0))); // Initial index. __ push(Immediate(Smi::FromInt(0))); // Initial index.
// Generate code for doing the condition check. // Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop); __ bind(&loop);
__ mov(eax, Operand(esp, 0 * kPointerSize)); // Get the current index. __ mov(eax, Operand(esp, 0 * kPointerSize)); // Get the current index.
__ cmp(eax, Operand(esp, 1 * kPointerSize)); // Compare to the array length. __ cmp(eax, Operand(esp, 1 * kPointerSize)); // Compare to the array length.
@ -1050,7 +1056,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(result_register(), ebx); __ mov(result_register(), ebx);
// Perform the assignment as if via '='. // Perform the assignment as if via '='.
{ EffectContext context(this); { EffectContext context(this);
EmitAssignment(stmt->each(), stmt->AssignmentId()); EmitAssignment(stmt->each());
} }
// Generate code for the body of the loop. // Generate code for the body of the loop.
@ -1061,7 +1067,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(loop_statement.continue_label()); __ bind(loop_statement.continue_label());
__ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1))); __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
EmitStackCheck(stmt); EmitStackCheck(stmt, &loop);
__ jmp(&loop); __ jmp(&loop);
// Remove the pointers stored on the stack. // Remove the pointers stored on the stack.
@ -1069,6 +1075,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ add(esp, Immediate(5 * kPointerSize)); __ add(esp, Immediate(5 * kPointerSize));
// Exit and decrement the loop depth. // Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
__ bind(&exit); __ bind(&exit);
decrement_loop_depth(); decrement_loop_depth();
} }
@ -1164,7 +1171,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF) RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET ? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT; : RelocInfo::CODE_TARGET_CONTEXT;
__ call(ic, mode); CallIC(ic, mode);
} }
@ -1245,7 +1252,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ mov(eax, GlobalObjectOperand()); __ mov(eax, GlobalObjectOperand());
__ mov(ecx, var->name()); __ mov(ecx, var->name());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
__ call(ic, RelocInfo::CODE_TARGET_CONTEXT); CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(eax); context()->Plug(eax);
break; break;
} }
@ -1445,7 +1452,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_classic_mode() Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize() ? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict(); : isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, key->id()); CallIC(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS); PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else { } else {
VisitForEffect(value); VisitForEffect(value);
@ -1472,7 +1479,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Smi::FromInt(1) : Smi::FromInt(1) :
Smi::FromInt(0))); Smi::FromInt(0)));
VisitForStackValue(value); VisitForStackValue(value);
__ CallRuntime(Runtime::kDefineAccessor, 4); __ push(Immediate(Smi::FromInt(NONE)));
__ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
break; break;
default: UNREACHABLE(); default: UNREACHABLE();
} }
@ -1709,14 +1717,14 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
ASSERT(!key->handle()->IsSmi()); ASSERT(!key->handle()->IsSmi());
__ mov(ecx, Immediate(key->handle())); __ mov(ecx, Immediate(key->handle()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
__ call(ic, RelocInfo::CODE_TARGET, prop->id()); CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
} }
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position()); SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
__ call(ic, RelocInfo::CODE_TARGET, prop->id()); CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
} }
@ -1737,7 +1745,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call); __ bind(&stub_call);
__ mov(eax, ecx); __ mov(eax, ecx);
BinaryOpStub stub(op, mode); BinaryOpStub stub(op, mode);
__ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id()); CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear); __ jmp(&done, Label::kNear);
@ -1822,13 +1830,13 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(edx); __ pop(edx);
BinaryOpStub stub(op, mode); BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code. JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
__ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id()); CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
context()->Plug(eax); context()->Plug(eax);
} }
void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) { void FullCodeGenerator::EmitAssignment(Expression* expr) {
// Invalid left-hand sides are rewritten to have a 'throw // Invalid left-hand sides are rewritten to have a 'throw
// ReferenceError' on the left-hand side. // ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) { if (!expr->IsValidLeftHandSide()) {
@ -1863,7 +1871,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_classic_mode() Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize() ? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict(); : isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic); CallIC(ic);
break; break;
} }
case KEYED_PROPERTY: { case KEYED_PROPERTY: {
@ -1876,11 +1884,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_classic_mode() Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize() ? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ call(ic); CallIC(ic);
break; break;
} }
} }
PrepareForBailoutForId(bailout_ast_id, TOS_REG);
context()->Plug(eax); context()->Plug(eax);
} }
@ -1894,7 +1901,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Handle<Code> ic = is_classic_mode() Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize() ? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict(); : isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET_CONTEXT); CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) { } else if (op == Token::INIT_CONST) {
// Const initializers need a write barrier. // Const initializers need a write barrier.
@ -2003,7 +2010,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode() Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize() ? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict(); : isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, expr->id()); CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case. // If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) { if (expr->ends_initialization_block()) {
@ -2043,7 +2050,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode() Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize() ? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, expr->id()); CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case. // If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) { if (expr->ends_initialization_block()) {
@ -2077,6 +2084,16 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
} }
void FullCodeGenerator::CallIC(Handle<Code> code,
RelocInfo::Mode rmode,
unsigned ast_id) {
ic_total_count_++;
__ call(code, rmode, ast_id);
}
void FullCodeGenerator::EmitCallWithIC(Call* expr, void FullCodeGenerator::EmitCallWithIC(Call* expr,
Handle<Object> name, Handle<Object> name,
RelocInfo::Mode mode) { RelocInfo::Mode mode) {
@ -2093,7 +2110,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
Handle<Code> ic = Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode); isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
__ call(ic, mode, expr->id()); CallIC(ic, mode, expr->id());
RecordJSReturnSite(expr); RecordJSReturnSite(expr);
// Restore context register. // Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@ -2125,7 +2142,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic = Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count); isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
__ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize)); // Key. __ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize)); // Key.
__ call(ic, RelocInfo::CODE_TARGET, expr->id()); CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr); RecordJSReturnSite(expr);
// Restore context register. // Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@ -3712,7 +3729,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
RelocInfo::Mode mode = RelocInfo::CODE_TARGET; RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
Handle<Code> ic = Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode); isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
__ call(ic, mode, expr->id()); CallIC(ic, mode, expr->id());
// Restore context register. // Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
} else { } else {
@ -3870,7 +3887,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
// accumulator register eax. // accumulator register eax.
VisitForAccumulatorValue(expr->expression()); VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
__ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id()); CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(eax); context()->Plug(eax);
} }
@ -3990,7 +4007,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(edx, eax); __ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1))); __ mov(eax, Immediate(Smi::FromInt(1)));
BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE); BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
__ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId()); CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
__ bind(&done); __ bind(&done);
@ -4024,7 +4041,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode() Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize() ? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict(); : isolate()->builtins()->StoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, expr->id()); CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) { if (expr->is_postfix()) {
if (!context()->IsEffect()) { if (!context()->IsEffect()) {
@ -4041,7 +4058,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode() Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize() ? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
__ call(ic, RelocInfo::CODE_TARGET, expr->id()); CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) { if (expr->is_postfix()) {
// Result is on the stack // Result is on the stack
@ -4069,7 +4086,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference // Use a regular load, not a contextual load, to avoid a reference
// error. // error.
__ call(ic); CallIC(ic);
PrepareForBailout(expr, TOS_REG); PrepareForBailout(expr, TOS_REG);
context()->Plug(eax); context()->Plug(eax);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) { } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@ -4249,7 +4266,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC. // Record position and call the compare IC.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op); Handle<Code> ic = CompareIC::GetUninitialized(op);
__ call(ic, RelocInfo::CODE_TARGET, expr->id()); CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);

6
deps/v8/src/ia32/ic-ia32.cc

@ -1639,6 +1639,9 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
__ pop(ebx); __ pop(ebx);
__ push(edx); __ push(edx);
__ push(ebx); // return address __ push(ebx); // return address
// Leaving the code managed by the register allocator and return to the
// convention of using esi as context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1); __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
} }
@ -1662,6 +1665,9 @@ void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
__ pop(ebx); __ pop(ebx);
__ push(edx); __ push(edx);
__ push(ebx); // return address __ push(ebx); // return address
// Leaving the code managed by the register allocator and return to the
// convention of using esi as context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1); __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
} }

161
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -1868,10 +1868,9 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// Faster code path to avoid two compares: subtract lower bound from the // Faster code path to avoid two compares: subtract lower bound from the
// actual type and do a signed compare with the width of the type range. // actual type and do a signed compare with the width of the type range.
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
__ mov(temp2, FieldOperand(temp, Map::kInstanceTypeOffset)); __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
__ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ cmpb(Operand(temp2), __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
static_cast<int8_t>(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(above, is_false); __ j(above, is_false);
} }
@ -2690,6 +2689,15 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) {
} }
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
ASSERT(ToRegister(instr->InputAt(0)).is(esi));
__ push(esi); // The context is the first argument.
__ push(Immediate(instr->hydrogen()->pairs()));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
CallRuntime(Runtime::kDeclareGlobals, 3, instr);
}
void LCodeGen::DoGlobalObject(LGlobalObject* instr) { void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register context = ToRegister(instr->context()); Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
@ -4070,7 +4078,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
} else { } else {
__ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset)); __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
__ and_(temp, mask); __ and_(temp, mask);
__ cmpb(Operand(temp), tag); __ cmp(temp, tag);
DeoptimizeIf(not_equal, instr->environment()); DeoptimizeIf(not_equal, instr->environment());
} }
} }
@ -4251,26 +4259,35 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
__ Assert(equal, "Unexpected object literal boilerplate"); __ Assert(equal, "Unexpected object literal boilerplate");
} }
// Only elements backing stores for non-COW arrays need to be copied.
Handle<FixedArrayBase> elements(object->elements());
bool has_elements = elements->length() > 0 &&
elements->map() != isolate()->heap()->fixed_cow_array_map();
// Increase the offset so that subsequent objects end up right after // Increase the offset so that subsequent objects end up right after
// this one. // this object and its backing store.
int current_offset = *offset; int object_offset = *offset;
int size = object->map()->instance_size(); int object_size = object->map()->instance_size();
*offset += size; int elements_offset = *offset + object_size;
int elements_size = has_elements ? elements->Size() : 0;
*offset += object_size + elements_size;
// Copy object header. // Copy object header.
ASSERT(object->properties()->length() == 0); ASSERT(object->properties()->length() == 0);
ASSERT(object->elements()->length() == 0 ||
object->elements()->map() == isolate()->heap()->fixed_cow_array_map());
int inobject_properties = object->map()->inobject_properties(); int inobject_properties = object->map()->inobject_properties();
int header_size = size - inobject_properties * kPointerSize; int header_size = object_size - inobject_properties * kPointerSize;
for (int i = 0; i < header_size; i += kPointerSize) { for (int i = 0; i < header_size; i += kPointerSize) {
if (has_elements && i == JSObject::kElementsOffset) {
__ lea(ecx, Operand(result, elements_offset));
} else {
__ mov(ecx, FieldOperand(source, i)); __ mov(ecx, FieldOperand(source, i));
__ mov(FieldOperand(result, current_offset + i), ecx); }
__ mov(FieldOperand(result, object_offset + i), ecx);
} }
// Copy in-object properties. // Copy in-object properties.
for (int i = 0; i < inobject_properties; i++) { for (int i = 0; i < inobject_properties; i++) {
int total_offset = current_offset + object->GetInObjectPropertyOffset(i); int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i)); Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
if (value->IsJSObject()) { if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value); Handle<JSObject> value_object = Handle<JSObject>::cast(value);
@ -4285,10 +4302,40 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
__ mov(FieldOperand(result, total_offset), Immediate(value)); __ mov(FieldOperand(result, total_offset), Immediate(value));
} }
} }
// Copy elements backing store header.
ASSERT(!has_elements || elements->IsFixedArray());
if (has_elements) {
__ LoadHeapObject(source, elements);
for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
__ mov(ecx, FieldOperand(source, i));
__ mov(FieldOperand(result, elements_offset + i), ecx);
}
}
// Copy elements backing store content.
ASSERT(!has_elements || elements->IsFixedArray());
int elements_length = has_elements ? elements->length() : 0;
for (int i = 0; i < elements_length; i++) {
int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
Handle<Object> value = JSObject::GetElement(object, i);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ lea(ecx, Operand(result, *offset));
__ mov(FieldOperand(result, total_offset), ecx);
__ LoadHeapObject(source, value_object);
EmitDeepCopy(value_object, result, source, offset);
} else if (value->IsHeapObject()) {
__ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
__ mov(FieldOperand(result, total_offset), ecx);
} else {
__ mov(FieldOperand(result, total_offset), Immediate(value));
}
}
} }
void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) { void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi)); ASSERT(ToRegister(instr->context()).is(esi));
int size = instr->hydrogen()->total_size(); int size = instr->hydrogen()->total_size();
@ -4310,14 +4357,14 @@ void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
} }
void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) { void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi)); ASSERT(ToRegister(instr->context()).is(esi));
Handle<FixedArray> literals(instr->environment()->closure()->literals());
Handle<FixedArray> constant_properties = Handle<FixedArray> constant_properties =
instr->hydrogen()->constant_properties(); instr->hydrogen()->constant_properties();
// Set up the parameters to the stub/runtime call. // Set up the parameters to the stub/runtime call.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); __ PushHeapObject(literals);
__ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index()))); __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ push(Immediate(constant_properties)); __ push(Immediate(constant_properties));
int flags = instr->hydrogen()->fast_elements() int flags = instr->hydrogen()->fast_elements()
@ -4414,7 +4461,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
__ push(Immediate(shared_info)); __ push(Immediate(shared_info));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else { } else {
__ push(Operand(ebp, StandardFrameConstants::kContextOffset)); __ push(esi);
__ push(Immediate(shared_info)); __ push(Immediate(shared_info));
__ push(Immediate(pretenure __ push(Immediate(pretenure
? factory()->true_value() ? factory()->true_value()
@ -4682,6 +4729,84 @@ void LCodeGen::DoIn(LIn* instr) {
} }
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ cmp(eax, isolate()->factory()->undefined_value());
DeoptimizeIf(equal, instr->environment());
__ cmp(eax, isolate()->factory()->null_value());
DeoptimizeIf(equal, instr->environment());
__ test(eax, Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr->environment());
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
DeoptimizeIf(below_equal, instr->environment());
Label use_cache, call_runtime;
__ CheckEnumCache(&call_runtime);
__ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
__ jmp(&use_cache, Label::kNear);
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(eax);
CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
DeoptimizeIf(not_equal, instr->environment());
__ bind(&use_cache);
}
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register map = ToRegister(instr->map());
Register result = ToRegister(instr->result());
__ LoadInstanceDescriptors(map, result);
__ mov(result,
FieldOperand(result, DescriptorArray::kEnumerationIndexOffset));
__ mov(result,
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ test(result, result);
DeoptimizeIf(equal, instr->environment());
}
void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
__ cmp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
DeoptimizeIf(not_equal, instr->environment());
}
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
Register object = ToRegister(instr->object());
Register index = ToRegister(instr->index());
Label out_of_object, done;
__ cmp(index, Immediate(0));
__ j(less, &out_of_object);
__ mov(object, FieldOperand(object,
index,
times_half_pointer_size,
JSObject::kHeaderSize));
__ jmp(&done, Label::kNear);
__ bind(&out_of_object);
__ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
__ neg(index);
// Index is now equal to out of object property index plus 1.
__ mov(object, FieldOperand(object,
index,
times_half_pointer_size,
FixedArray::kHeaderSize - kPointerSize));
__ bind(&done);
}
#undef __ #undef __
} } // namespace v8::internal } } // namespace v8::internal

48
deps/v8/src/ia32/lithium-ia32.cc

@ -1150,6 +1150,12 @@ LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
} }
LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
}
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) { LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
LOperand* context = UseRegisterAtStart(instr->value()); LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new(zone()) LGlobalObject(context)); return DefineAsRegister(new(zone()) LGlobalObject(context));
@ -2206,25 +2212,24 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
} }
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) { LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
LOperand* context = UseFixed(instr->context(), esi); LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall( return MarkAsCall(
DefineFixed(new(zone()) LArrayLiteral(context), eax), instr); DefineFixed(new(zone()) LFastLiteral(context), eax), instr);
} }
LInstruction* LChunkBuilder::DoObjectLiteralFast(HObjectLiteralFast* instr) { LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
LOperand* context = UseFixed(instr->context(), esi); LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall( return MarkAsCall(
DefineFixed(new(zone()) LObjectLiteralFast(context), eax), instr); DefineFixed(new(zone()) LArrayLiteral(context), eax), instr);
} }
LInstruction* LChunkBuilder::DoObjectLiteralGeneric( LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
HObjectLiteralGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi); LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall( return MarkAsCall(
DefineFixed(new(zone()) LObjectLiteralGeneric(context), eax), instr); DefineFixed(new(zone()) LObjectLiteral(context), eax), instr);
} }
@ -2403,6 +2408,35 @@ LInstruction* LChunkBuilder::DoIn(HIn* instr) {
} }
LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* object = UseFixed(instr->enumerable(), eax);
LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
LOperand* map = UseRegister(instr->map());
return AssignEnvironment(DefineAsRegister(
new(zone()) LForInCacheArray(map)));
}
LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* map = UseRegisterAtStart(instr->map());
return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
}
LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
LOperand* object = UseRegister(instr->object());
LOperand* index = UseTempRegister(instr->index());
return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index));
}
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32 #endif // V8_TARGET_ARCH_IA32

104
deps/v8/src/ia32/lithium-ia32.h

@ -81,11 +81,13 @@ class LCodeGen;
V(ConstantI) \ V(ConstantI) \
V(ConstantT) \ V(ConstantT) \
V(Context) \ V(Context) \
V(DeclareGlobals) \
V(DeleteProperty) \ V(DeleteProperty) \
V(Deoptimize) \ V(Deoptimize) \
V(DivI) \ V(DivI) \
V(DoubleToI) \ V(DoubleToI) \
V(ElementsKind) \ V(ElementsKind) \
V(FastLiteral) \
V(FixedArrayBaseLength) \ V(FixedArrayBaseLength) \
V(FunctionLiteral) \ V(FunctionLiteral) \
V(GetCachedArrayIndex) \ V(GetCachedArrayIndex) \
@ -129,8 +131,7 @@ class LCodeGen;
V(NumberTagD) \ V(NumberTagD) \
V(NumberTagI) \ V(NumberTagI) \
V(NumberUntagD) \ V(NumberUntagD) \
V(ObjectLiteralFast) \ V(ObjectLiteral) \
V(ObjectLiteralGeneric) \
V(OsrEntry) \ V(OsrEntry) \
V(OuterContext) \ V(OuterContext) \
V(Parameter) \ V(Parameter) \
@ -166,7 +167,11 @@ class LCodeGen;
V(TypeofIsAndBranch) \ V(TypeofIsAndBranch) \
V(UnaryMathOperation) \ V(UnaryMathOperation) \
V(UnknownOSRValue) \ V(UnknownOSRValue) \
V(ValueOf) V(ValueOf) \
V(ForInPrepareMap) \
V(ForInCacheArray) \
V(CheckMapValue) \
V(LoadFieldByIndex)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@ -1385,6 +1390,17 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
}; };
class LDeclareGlobals: public LTemplateInstruction<0, 1, 0> {
public:
explicit LDeclareGlobals(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
};
class LGlobalObject: public LTemplateInstruction<1, 1, 0> { class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LGlobalObject(LOperand* context) { explicit LGlobalObject(LOperand* context) {
@ -1979,42 +1995,42 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
}; };
class LArrayLiteral: public LTemplateInstruction<1, 1, 0> { class LFastLiteral: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LArrayLiteral(LOperand* context) { explicit LFastLiteral(LOperand* context) {
inputs_[0] = context; inputs_[0] = context;
} }
LOperand* context() { return inputs_[0]; } LOperand* context() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal") DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral) DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
}; };
class LObjectLiteralFast: public LTemplateInstruction<1, 1, 0> { class LArrayLiteral: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LObjectLiteralFast(LOperand* context) { explicit LArrayLiteral(LOperand* context) {
inputs_[0] = context; inputs_[0] = context;
} }
LOperand* context() { return inputs_[0]; } LOperand* context() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast, "object-literal-fast") DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralFast) DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
}; };
class LObjectLiteralGeneric: public LTemplateInstruction<1, 1, 0> { class LObjectLiteral: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LObjectLiteralGeneric(LOperand* context) { explicit LObjectLiteral(LOperand* context) {
inputs_[0] = context; inputs_[0] = context;
} }
LOperand* context() { return inputs_[0]; } LOperand* context() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric, "object-literal-generic") DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralGeneric) DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
}; };
@ -2156,6 +2172,64 @@ class LIn: public LTemplateInstruction<1, 3, 0> {
}; };
class LForInPrepareMap: public LTemplateInstruction<1, 2, 0> {
public:
LForInPrepareMap(LOperand* context, LOperand* object) {
inputs_[0] = context;
inputs_[1] = object;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
};
class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
}
LOperand* map() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
int idx() {
return HForInCacheArray::cast(this->hydrogen_value())->idx();
}
};
class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
public:
LCheckMapValue(LOperand* value, LOperand* map) {
inputs_[0] = value;
inputs_[1] = map;
}
LOperand* value() { return inputs_[0]; }
LOperand* map() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
};
class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
inputs_[1] = index;
}
LOperand* object() { return inputs_[0]; }
LOperand* index() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
};
class LChunkBuilder; class LChunkBuilder;
class LChunk: public ZoneObject { class LChunk: public ZoneObject {
public: public:

57
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -862,8 +862,7 @@ void MacroAssembler::Throw(Register value) {
} }
void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type, void MacroAssembler::ThrowUncatchable(Register value) {
Register value) {
// Adjust this code if not the case. // Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
@ -873,21 +872,9 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The exception is expected in eax. // The exception is expected in eax.
if (type == OUT_OF_MEMORY) { if (!value.is(eax)) {
// Set external caught exception to false.
ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
isolate());
mov(Operand::StaticVariable(external_caught), Immediate(false));
// Set pending exception and eax to out of memory exception.
ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
isolate());
mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
mov(Operand::StaticVariable(pending_exception), eax);
} else if (!value.is(eax)) {
mov(eax, value); mov(eax, value);
} }
// Drop the stack pointer to the top of the top stack handler. // Drop the stack pointer to the top of the top stack handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate()); ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
mov(esp, Operand::StaticVariable(handler_address)); mov(esp, Operand::StaticVariable(handler_address));
@ -2789,6 +2776,46 @@ void MacroAssembler::EnsureNotWhite(
bind(&done); bind(&done);
} }
void MacroAssembler::CheckEnumCache(Label* call_runtime) {
Label next;
mov(ecx, eax);
bind(&next);
// Check that there are no elements. Register ecx contains the
// current JS object we've reached through the prototype chain.
cmp(FieldOperand(ecx, JSObject::kElementsOffset),
isolate()->factory()->empty_fixed_array());
j(not_equal, call_runtime);
// Check that instance descriptors are not empty so that we can
// check for an enum cache. Leave the map in ebx for the subsequent
// prototype load.
mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOrBitField3Offset));
JumpIfSmi(edx, call_runtime);
// Check that there is an enum cache in the non-empty instance
// descriptors (edx). This is the case if the next enumeration
// index field does not contain a smi.
mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
JumpIfSmi(edx, call_runtime);
// For all objects but the receiver, check that the cache is empty.
Label check_prototype;
cmp(ecx, eax);
j(equal, &check_prototype, Label::kNear);
mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
cmp(edx, isolate()->factory()->empty_fixed_array());
j(not_equal, call_runtime);
// Load the prototype from the map and loop if non-null.
bind(&check_prototype);
mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
cmp(ecx, isolate()->factory()->null_value());
j(not_equal, &next);
}
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32 #endif // V8_TARGET_ARCH_IA32

9
deps/v8/src/ia32/macro-assembler-ia32.h

@ -496,10 +496,11 @@ class MacroAssembler: public Assembler {
// Unlink the stack handler on top of the stack from the try handler chain. // Unlink the stack handler on top of the stack from the try handler chain.
void PopTryHandler(); void PopTryHandler();
// Activate the top handler in the try hander chain. // Throw to the top handler in the try hander chain.
void Throw(Register value); void Throw(Register value);
void ThrowUncatchable(UncatchableExceptionType type, Register value); // Throw past all JS frames to the top JS entry frame.
void ThrowUncatchable(Register value);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Inline caching support // Inline caching support
@ -828,6 +829,10 @@ class MacroAssembler: public Assembler {
void EnterFrame(StackFrame::Type type); void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type); void LeaveFrame(StackFrame::Type type);
// Expects object in eax and returns map with validated enum cache
// in eax. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Label* call_runtime);
private: private:
bool generating_stub_; bool generating_stub_;
bool allow_stub_calls_; bool allow_stub_calls_;

199
deps/v8/src/ia32/stub-cache-ia32.cc

@ -2591,7 +2591,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
ElementsKind elements_kind = receiver_map->elements_kind(); ElementsKind elements_kind = receiver_map->elements_kind();
bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE; bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub = Handle<Code> stub =
KeyedStoreElementStub(is_jsarray, elements_kind).GetCode(); KeyedStoreElementStub(is_jsarray, elements_kind, grow_mode_).GetCode();
__ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK); __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
@ -3718,14 +3718,16 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
void KeyedStoreStubCompiler::GenerateStoreFastElement( void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm, MacroAssembler* masm,
bool is_js_array, bool is_js_array,
ElementsKind elements_kind) { ElementsKind elements_kind,
KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : value // -- eax : value
// -- ecx : key // -- ecx : key
// -- edx : receiver // -- edx : receiver
// -- esp[0] : return address // -- esp[0] : return address
// ----------------------------------- // -----------------------------------
Label miss_force_generic, transition_elements_kind; Label miss_force_generic, grow, slow, transition_elements_kind;
Label check_capacity, prepare_slow, finish_store, commit_backing_store;
// This stub is meant to be tail-jumped to, the receiver must already // This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi. // have been verified by the caller to not be a smi.
@ -3733,24 +3735,32 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Check that the key is a smi. // Check that the key is a smi.
__ JumpIfNotSmi(ecx, &miss_force_generic); __ JumpIfNotSmi(ecx, &miss_force_generic);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ JumpIfNotSmi(eax, &transition_elements_kind);
}
// Get the elements array and make sure it is a fast element array, not 'cow'. // Get the elements array and make sure it is a fast element array, not 'cow'.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ cmp(FieldOperand(edi, HeapObject::kMapOffset),
Immediate(masm->isolate()->factory()->fixed_array_map()));
__ j(not_equal, &miss_force_generic);
if (is_js_array) { if (is_js_array) {
// Check that the key is within bounds. // Check that the key is within bounds.
__ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis. __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis.
if (grow_mode == ALLOW_JSARRAY_GROWTH) {
__ j(above_equal, &grow);
} else {
__ j(above_equal, &miss_force_generic); __ j(above_equal, &miss_force_generic);
}
} else { } else {
// Check that the key is within bounds. // Check that the key is within bounds.
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis. __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis.
__ j(above_equal, &miss_force_generic); __ j(above_equal, &miss_force_generic);
} }
__ cmp(FieldOperand(edi, HeapObject::kMapOffset),
Immediate(masm->isolate()->factory()->fixed_array_map()));
__ j(not_equal, &miss_force_generic);
__ bind(&finish_store);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ JumpIfNotSmi(eax, &transition_elements_kind);
// ecx is a smi, use times_half_pointer_size instead of // ecx is a smi, use times_half_pointer_size instead of
// times_pointer_size // times_pointer_size
__ mov(FieldOperand(edi, __ mov(FieldOperand(edi,
@ -3768,8 +3778,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
FixedArray::kHeaderSize)); FixedArray::kHeaderSize));
__ mov(Operand(ecx, 0), eax); __ mov(Operand(ecx, 0), eax);
// Make sure to preserve the value in register eax. // Make sure to preserve the value in register eax.
__ mov(edx, eax); __ mov(ebx, eax);
__ RecordWrite(edi, ecx, edx, kDontSaveFPRegs); __ RecordWrite(edi, ecx, ebx, kDontSaveFPRegs);
} }
// Done. // Done.
@ -3785,19 +3795,94 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ bind(&transition_elements_kind); __ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ jmp(ic_miss, RelocInfo::CODE_TARGET); __ jmp(ic_miss, RelocInfo::CODE_TARGET);
if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
// Handle transition requiring the array to grow.
__ bind(&grow);
// Make sure the array is only growing by a single element, anything else
// must be handled by the runtime. Flags are already set by previous
// compare.
__ j(not_equal, &miss_force_generic);
// Check for the empty array, and preallocate a small backing store if
// possible.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
__ j(not_equal, &check_capacity);
int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
__ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
// Restore the key, which is known to be the array length.
// eax: value
// ecx: key
// edx: receiver
// edi: elements
// Make sure that the backing store can hold additional elements.
__ mov(FieldOperand(edi, JSObject::kMapOffset),
Immediate(masm->isolate()->factory()->fixed_array_map()));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset),
Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
__ mov(ebx, Immediate(masm->isolate()->factory()->the_hole_value()));
for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
__ mov(FieldOperand(edi, FixedArray::SizeFor(i)), ebx);
}
// Store the element at index zero.
__ mov(FieldOperand(edi, FixedArray::SizeFor(0)), eax);
// Install the new backing store in the JSArray.
__ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
__ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Increment the length of the array.
__ mov(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ ret(0);
__ bind(&check_capacity);
__ cmp(FieldOperand(edi, HeapObject::kMapOffset),
Immediate(masm->isolate()->factory()->fixed_cow_array_map()));
__ j(equal, &miss_force_generic);
// eax: value
// ecx: key
// edx: receiver
// edi: elements
// Make sure that the backing store can hold additional elements.
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
__ j(above_equal, &slow);
// Grow the array and finish the store.
__ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ jmp(&finish_store);
__ bind(&prepare_slow);
// Restore the key, which is known to be the array length.
__ mov(ecx, Immediate(0));
__ bind(&slow);
Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
__ jmp(ic_slow, RelocInfo::CODE_TARGET);
}
} }
void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
MacroAssembler* masm, MacroAssembler* masm,
bool is_js_array) { bool is_js_array,
KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : value // -- eax : value
// -- ecx : key // -- ecx : key
// -- edx : receiver // -- edx : receiver
// -- esp[0] : return address // -- esp[0] : return address
// ----------------------------------- // -----------------------------------
Label miss_force_generic, transition_elements_kind; Label miss_force_generic, transition_elements_kind, grow, slow;
Label check_capacity, prepare_slow, finish_store, commit_backing_store;
// This stub is meant to be tail-jumped to, the receiver must already // This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi. // have been verified by the caller to not be a smi.
@ -3812,19 +3897,20 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
if (is_js_array) { if (is_js_array) {
// Check that the key is within bounds. // Check that the key is within bounds.
__ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis. __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis.
if (grow_mode == ALLOW_JSARRAY_GROWTH) {
__ j(above_equal, &grow);
} else {
__ j(above_equal, &miss_force_generic);
}
} else { } else {
// Check that the key is within bounds. // Check that the key is within bounds.
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis. __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); // smis.
}
__ j(above_equal, &miss_force_generic); __ j(above_equal, &miss_force_generic);
}
__ StoreNumberToDoubleElements(eax, __ bind(&finish_store);
edi, __ StoreNumberToDoubleElements(eax, edi, ecx, edx, xmm0,
ecx, &transition_elements_kind, true);
edx,
xmm0,
&transition_elements_kind,
true);
__ ret(0); __ ret(0);
// Handle store cache miss, replacing the ic with the generic stub. // Handle store cache miss, replacing the ic with the generic stub.
@ -3837,6 +3923,79 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ bind(&transition_elements_kind); __ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ jmp(ic_miss, RelocInfo::CODE_TARGET); __ jmp(ic_miss, RelocInfo::CODE_TARGET);
if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
// Handle transition requiring the array to grow.
__ bind(&grow);
// Make sure the array is only growing by a single element, anything else
// must be handled by the runtime. Flags are already set by previous
// compare.
__ j(not_equal, &miss_force_generic);
// Transition on values that can't be stored in a FixedDoubleArray.
Label value_is_smi;
__ JumpIfSmi(eax, &value_is_smi);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
Immediate(Handle<Map>(masm->isolate()->heap()->heap_number_map())));
__ j(not_equal, &transition_elements_kind);
__ bind(&value_is_smi);
// Check for the empty array, and preallocate a small backing store if
// possible.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
__ j(not_equal, &check_capacity);
int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
__ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
// Restore the key, which is known to be the array length.
__ mov(ecx, Immediate(0));
// eax: value
// ecx: key
// edx: receiver
// edi: elements
// Initialize the new FixedDoubleArray. Leave elements unitialized for
// efficiency, they are guaranteed to be initialized before use.
__ mov(FieldOperand(edi, JSObject::kMapOffset),
Immediate(masm->isolate()->factory()->fixed_double_array_map()));
__ mov(FieldOperand(edi, FixedDoubleArray::kLengthOffset),
Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
// Install the new backing store in the JSArray.
__ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
__ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Increment the length of the array.
__ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ jmp(&finish_store);
__ bind(&check_capacity);
// eax: value
// ecx: key
// edx: receiver
// edi: elements
// Make sure that the backing store can hold additional elements.
__ cmp(ecx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
__ j(above_equal, &slow);
// Grow the array and finish the store.
__ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ jmp(&finish_store);
__ bind(&prepare_slow);
// Restore the key, which is known to be the array length.
__ mov(ecx, Immediate(0));
__ bind(&slow);
Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
__ jmp(ic_slow, RelocInfo::CODE_TARGET);
}
} }

7
deps/v8/src/ic-inl.h

@ -79,19 +79,20 @@ Code* IC::GetTargetAtAddress(Address address) {
void IC::SetTargetAtAddress(Address address, Code* target) { void IC::SetTargetAtAddress(Address address, Code* target) {
ASSERT(target->is_inline_cache_stub() || target->is_compare_ic_stub()); ASSERT(target->is_inline_cache_stub() || target->is_compare_ic_stub());
Code* old_target = GetTargetAtAddress(address);
#ifdef DEBUG #ifdef DEBUG
// STORE_IC and KEYED_STORE_IC use Code::extra_ic_state() to mark // STORE_IC and KEYED_STORE_IC use Code::extra_ic_state() to mark
// ICs as strict mode. The strict-ness of the IC must be preserved. // ICs as strict mode. The strict-ness of the IC must be preserved.
Code* old_target = GetTargetAtAddress(address);
if (old_target->kind() == Code::STORE_IC || if (old_target->kind() == Code::STORE_IC ||
old_target->kind() == Code::KEYED_STORE_IC) { old_target->kind() == Code::KEYED_STORE_IC) {
ASSERT(old_target->extra_ic_state() == target->extra_ic_state()); ASSERT(Code::GetStrictMode(old_target->extra_ic_state()) ==
Code::GetStrictMode(target->extra_ic_state()));
} }
#endif #endif
Assembler::set_target_address_at(address, target->instruction_start()); Assembler::set_target_address_at(address, target->instruction_start());
target->GetHeap()->incremental_marking()->RecordCodeTargetPatch(address, target->GetHeap()->incremental_marking()->RecordCodeTargetPatch(address,
target); target);
PostPatching(); PostPatching(address, target, old_target);
} }

197
deps/v8/src/ic.cc

@ -81,9 +81,13 @@ void IC::TraceIC(const char* type,
} }
} }
JavaScriptFrame::PrintTop(stdout, false, true); JavaScriptFrame::PrintTop(stdout, false, true);
PrintF(" (%c->%c)", bool new_can_grow =
Code::GetKeyedAccessGrowMode(new_target->extra_ic_state()) ==
ALLOW_JSARRAY_GROWTH;
PrintF(" (%c->%c%s)",
TransitionMarkFromState(old_state), TransitionMarkFromState(old_state),
TransitionMarkFromState(new_state)); TransitionMarkFromState(new_state),
new_can_grow ? ".GROW" : "");
name->Print(); name->Print();
PrintF("]\n"); PrintF("]\n");
} }
@ -292,7 +296,32 @@ Failure* IC::ReferenceError(const char* type, Handle<String> name) {
} }
void IC::PostPatching() { void IC::PostPatching(Address address, Code* target, Code* old_target) {
if (FLAG_type_info_threshold > 0) {
if (old_target->is_inline_cache_stub() &&
target->is_inline_cache_stub()) {
State old_state = old_target->ic_state();
State new_state = target->ic_state();
bool was_uninitialized =
old_state == UNINITIALIZED || old_state == PREMONOMORPHIC;
bool is_uninitialized =
new_state == UNINITIALIZED || new_state == PREMONOMORPHIC;
int delta = 0;
if (was_uninitialized && !is_uninitialized) {
delta = 1;
} else if (!was_uninitialized && is_uninitialized) {
delta = -1;
}
if (delta != 0) {
Code* host = target->GetHeap()->isolate()->
inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
TypeFeedbackInfo* info =
TypeFeedbackInfo::cast(host->type_feedback_info());
info->set_ic_with_typeinfo_count(
info->ic_with_typeinfo_count() + delta);
}
}
}
if (FLAG_watch_ic_patching) { if (FLAG_watch_ic_patching) {
Isolate::Current()->runtime_profiler()->NotifyICChanged(); Isolate::Current()->runtime_profiler()->NotifyICChanged();
// We do not want to optimize until the ICs have settled down, // We do not want to optimize until the ICs have settled down,
@ -309,7 +338,9 @@ void IC::PostPatching() {
if (raw_frame->is_java_script()) { if (raw_frame->is_java_script()) {
JSFunction* function = JSFunction* function =
JSFunction::cast(JavaScriptFrame::cast(raw_frame)->function()); JSFunction::cast(JavaScriptFrame::cast(raw_frame)->function());
function->shared()->set_profiler_ticks(0); if (function->IsOptimized()) continue;
SharedFunctionInfo* shared = function->shared();
shared->set_profiler_ticks(0);
} }
it.Advance(); it.Advance();
} }
@ -375,7 +406,7 @@ void LoadIC::Clear(Address address, Code* target) {
void StoreIC::Clear(Address address, Code* target) { void StoreIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return; if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address, SetTargetAtAddress(address,
(target->extra_ic_state() == kStrictMode) (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
? initialize_stub_strict() ? initialize_stub_strict()
: initialize_stub()); : initialize_stub());
} }
@ -384,7 +415,7 @@ void StoreIC::Clear(Address address, Code* target) {
void KeyedStoreIC::Clear(Address address, Code* target) { void KeyedStoreIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return; if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address, SetTargetAtAddress(address,
(target->extra_ic_state() == kStrictMode) (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
? initialize_stub_strict() ? initialize_stub_strict()
: initialize_stub()); : initialize_stub());
} }
@ -996,19 +1027,22 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
Handle<Code> KeyedLoadIC::GetElementStubWithoutMapCheck( Handle<Code> KeyedLoadIC::GetElementStubWithoutMapCheck(
bool is_js_array, bool is_js_array,
ElementsKind elements_kind) { ElementsKind elements_kind,
KeyedAccessGrowMode grow_mode) {
ASSERT(grow_mode == DO_NOT_ALLOW_JSARRAY_GROWTH);
return KeyedLoadElementStub(elements_kind).GetCode(); return KeyedLoadElementStub(elements_kind).GetCode();
} }
Handle<Code> KeyedLoadIC::ComputePolymorphicStub( Handle<Code> KeyedLoadIC::ComputePolymorphicStub(
MapHandleList* receiver_maps, MapHandleList* receiver_maps,
StrictModeFlag strict_mode) { StrictModeFlag strict_mode,
KeyedAccessGrowMode growth_mode) {
CodeHandleList handler_ics(receiver_maps->length()); CodeHandleList handler_ics(receiver_maps->length());
for (int i = 0; i < receiver_maps->length(); ++i) { for (int i = 0; i < receiver_maps->length(); ++i) {
Handle<Map> receiver_map = receiver_maps->at(i); Handle<Map> receiver_map = receiver_maps->at(i);
Handle<Code> cached_stub = ComputeMonomorphicStubWithoutMapCheck( Handle<Code> cached_stub = ComputeMonomorphicStubWithoutMapCheck(
receiver_map, strict_mode); receiver_map, strict_mode, growth_mode);
handler_ics.Add(cached_stub); handler_ics.Add(cached_stub);
} }
KeyedLoadStubCompiler compiler(isolate()); KeyedLoadStubCompiler compiler(isolate());
@ -1493,12 +1527,9 @@ Handle<Code> KeyedIC::ComputeStub(Handle<JSObject> receiver,
StrictModeFlag strict_mode, StrictModeFlag strict_mode,
Handle<Code> generic_stub) { Handle<Code> generic_stub) {
State ic_state = target()->ic_state(); State ic_state = target()->ic_state();
if ((ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) && KeyedAccessGrowMode grow_mode = IsGrowStubKind(stub_kind)
!IsTransitionStubKind(stub_kind)) { ? ALLOW_JSARRAY_GROWTH
return ComputeMonomorphicStub( : DO_NOT_ALLOW_JSARRAY_GROWTH;
receiver, stub_kind, strict_mode, generic_stub);
}
ASSERT(target() != *generic_stub);
// Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
// via megamorphic stubs, since they don't have a map in their relocation info // via megamorphic stubs, since they don't have a map in their relocation info
@ -1508,15 +1539,39 @@ Handle<Code> KeyedIC::ComputeStub(Handle<JSObject> receiver,
return generic_stub; return generic_stub;
} }
// Determine the list of receiver maps that this call site has seen, bool monomorphic = false;
// adding the map that was just encountered.
MapHandleList target_receiver_maps; MapHandleList target_receiver_maps;
Handle<Map> receiver_map(receiver->map()); if (ic_state != UNINITIALIZED && ic_state != PREMONOMORPHIC) {
GetReceiverMapsForStub(Handle<Code>(target()), &target_receiver_maps);
}
if (!IsTransitionStubKind(stub_kind)) {
if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) { if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
target_receiver_maps.Add(receiver_map); monomorphic = true;
} else { } else {
GetReceiverMapsForStub(Handle<Code>(target()), &target_receiver_maps); if (ic_state == MONOMORPHIC) {
// The first time a receiver is seen that is a transitioned version of
// the previous monomorphic receiver type, assume the new ElementsKind
// is the monomorphic type. This benefits global arrays that only
// transition once, and all call sites accessing them are faster if they
// remain monomorphic. If this optimistic assumption is not true, the IC
// will miss again and it will become polymorphic and support both the
// untransitioned and transitioned maps.
monomorphic = IsMoreGeneralElementsKindTransition(
target_receiver_maps.at(0)->elements_kind(),
receiver->GetElementsKind());
} }
}
}
if (monomorphic) {
return ComputeMonomorphicStub(
receiver, stub_kind, strict_mode, generic_stub);
}
ASSERT(target() != *generic_stub);
// Determine the list of receiver maps that this call site has seen,
// adding the map that was just encountered.
Handle<Map> receiver_map(receiver->map());
bool map_added = bool map_added =
AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map); AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map);
if (IsTransitionStubKind(stub_kind)) { if (IsTransitionStubKind(stub_kind)) {
@ -1537,14 +1592,21 @@ Handle<Code> KeyedIC::ComputeStub(Handle<JSObject> receiver,
return generic_stub; return generic_stub;
} }
if ((Code::GetKeyedAccessGrowMode(target()->extra_ic_state()) ==
ALLOW_JSARRAY_GROWTH)) {
grow_mode = ALLOW_JSARRAY_GROWTH;
}
Handle<PolymorphicCodeCache> cache = Handle<PolymorphicCodeCache> cache =
isolate()->factory()->polymorphic_code_cache(); isolate()->factory()->polymorphic_code_cache();
Code::Flags flags = Code::ComputeFlags(kind(), MEGAMORPHIC, strict_mode); Code::ExtraICState extra_state = Code::ComputeExtraICState(grow_mode,
strict_mode);
Code::Flags flags = Code::ComputeFlags(kind(), MEGAMORPHIC, extra_state);
Handle<Object> probe = cache->Lookup(&target_receiver_maps, flags); Handle<Object> probe = cache->Lookup(&target_receiver_maps, flags);
if (probe->IsCode()) return Handle<Code>::cast(probe); if (probe->IsCode()) return Handle<Code>::cast(probe);
Handle<Code> stub = Handle<Code> stub =
ComputePolymorphicStub(&target_receiver_maps, strict_mode); ComputePolymorphicStub(&target_receiver_maps, strict_mode, grow_mode);
PolymorphicCodeCache::Update(cache, &target_receiver_maps, flags, stub); PolymorphicCodeCache::Update(cache, &target_receiver_maps, flags, stub);
return stub; return stub;
} }
@ -1552,7 +1614,8 @@ Handle<Code> KeyedIC::ComputeStub(Handle<JSObject> receiver,
Handle<Code> KeyedIC::ComputeMonomorphicStubWithoutMapCheck( Handle<Code> KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
Handle<Map> receiver_map, Handle<Map> receiver_map,
StrictModeFlag strict_mode) { StrictModeFlag strict_mode,
KeyedAccessGrowMode grow_mode) {
if ((receiver_map->instance_type() & kNotStringTag) == 0) { if ((receiver_map->instance_type() & kNotStringTag) == 0) {
ASSERT(!string_stub().is_null()); ASSERT(!string_stub().is_null());
return string_stub(); return string_stub();
@ -1564,7 +1627,8 @@ Handle<Code> KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
receiver_map->has_external_array_elements()); receiver_map->has_external_array_elements());
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
return GetElementStubWithoutMapCheck(is_js_array, return GetElementStubWithoutMapCheck(is_js_array,
receiver_map->elements_kind()); receiver_map->elements_kind(),
grow_mode);
} }
} }
@ -1591,9 +1655,12 @@ Handle<Map> KeyedIC::ComputeTransitionedMap(Handle<JSObject> receiver,
switch (stub_kind) { switch (stub_kind) {
case KeyedIC::STORE_TRANSITION_SMI_TO_OBJECT: case KeyedIC::STORE_TRANSITION_SMI_TO_OBJECT:
case KeyedIC::STORE_TRANSITION_DOUBLE_TO_OBJECT: case KeyedIC::STORE_TRANSITION_DOUBLE_TO_OBJECT:
case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT:
case KeyedIC::STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT:
return JSObject::GetElementsTransitionMap(receiver, FAST_ELEMENTS); return JSObject::GetElementsTransitionMap(receiver, FAST_ELEMENTS);
break; break;
case KeyedIC::STORE_TRANSITION_SMI_TO_DOUBLE: case KeyedIC::STORE_TRANSITION_SMI_TO_DOUBLE:
case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE:
return JSObject::GetElementsTransitionMap(receiver, FAST_DOUBLE_ELEMENTS); return JSObject::GetElementsTransitionMap(receiver, FAST_DOUBLE_ELEMENTS);
break; break;
default: default:
@ -1605,13 +1672,16 @@ Handle<Map> KeyedIC::ComputeTransitionedMap(Handle<JSObject> receiver,
Handle<Code> KeyedStoreIC::GetElementStubWithoutMapCheck( Handle<Code> KeyedStoreIC::GetElementStubWithoutMapCheck(
bool is_js_array, bool is_js_array,
ElementsKind elements_kind) { ElementsKind elements_kind,
return KeyedStoreElementStub(is_js_array, elements_kind).GetCode(); KeyedAccessGrowMode grow_mode) {
return KeyedStoreElementStub(is_js_array, elements_kind, grow_mode).GetCode();
} }
Handle<Code> KeyedStoreIC::ComputePolymorphicStub(MapHandleList* receiver_maps, Handle<Code> KeyedStoreIC::ComputePolymorphicStub(
StrictModeFlag strict_mode) { MapHandleList* receiver_maps,
StrictModeFlag strict_mode,
KeyedAccessGrowMode grow_mode) {
// Collect MONOMORPHIC stubs for all target_receiver_maps. // Collect MONOMORPHIC stubs for all target_receiver_maps.
CodeHandleList handler_ics(receiver_maps->length()); CodeHandleList handler_ics(receiver_maps->length());
MapHandleList transitioned_maps(receiver_maps->length()); MapHandleList transitioned_maps(receiver_maps->length());
@ -1625,16 +1695,17 @@ Handle<Code> KeyedStoreIC::ComputePolymorphicStub(MapHandleList* receiver_maps,
receiver_map->elements_kind(), // original elements_kind receiver_map->elements_kind(), // original elements_kind
transitioned_map->elements_kind(), transitioned_map->elements_kind(),
receiver_map->instance_type() == JS_ARRAY_TYPE, // is_js_array receiver_map->instance_type() == JS_ARRAY_TYPE, // is_js_array
strict_mode).GetCode(); strict_mode, grow_mode).GetCode();
} else { } else {
cached_stub = ComputeMonomorphicStubWithoutMapCheck(receiver_map, cached_stub = ComputeMonomorphicStubWithoutMapCheck(receiver_map,
strict_mode); strict_mode,
grow_mode);
} }
ASSERT(!cached_stub.is_null()); ASSERT(!cached_stub.is_null());
handler_ics.Add(cached_stub); handler_ics.Add(cached_stub);
transitioned_maps.Add(transitioned_map); transitioned_maps.Add(transitioned_map);
} }
KeyedStoreStubCompiler compiler(isolate(), strict_mode); KeyedStoreStubCompiler compiler(isolate(), strict_mode, grow_mode);
Handle<Code> code = compiler.CompileStorePolymorphic( Handle<Code> code = compiler.CompileStorePolymorphic(
receiver_maps, &handler_ics, &transitioned_maps); receiver_maps, &handler_ics, &transitioned_maps);
isolate()->counters()->keyed_store_polymorphic_stubs()->Increment(); isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
@ -1644,6 +1715,48 @@ Handle<Code> KeyedStoreIC::ComputePolymorphicStub(MapHandleList* receiver_maps,
} }
KeyedIC::StubKind KeyedStoreIC::GetStubKind(Handle<JSObject> receiver,
Handle<Object> key,
Handle<Object> value) {
ASSERT(key->IsSmi());
int index = Smi::cast(*key)->value();
bool allow_growth = receiver->IsJSArray() &&
JSArray::cast(*receiver)->length()->IsSmi() &&
index >= Smi::cast(JSArray::cast(*receiver)->length())->value();
if (allow_growth) {
// Handle growing array in stub if necessary.
if (receiver->HasFastSmiOnlyElements()) {
if (value->IsHeapNumber()) {
return STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE;
}
if (value->IsHeapObject()) {
return STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT;
}
} else if (receiver->HasFastDoubleElements()) {
if (!value->IsSmi() && !value->IsHeapNumber()) {
return STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT;
}
}
return STORE_AND_GROW_NO_TRANSITION;
} else {
// Handle only in-bounds elements accesses.
if (receiver->HasFastSmiOnlyElements()) {
if (value->IsHeapNumber()) {
return STORE_TRANSITION_SMI_TO_DOUBLE;
} else if (value->IsHeapObject()) {
return STORE_TRANSITION_SMI_TO_OBJECT;
}
} else if (receiver->HasFastDoubleElements()) {
if (!value->IsSmi() && !value->IsHeapNumber()) {
return STORE_TRANSITION_DOUBLE_TO_OBJECT;
}
}
return STORE_NO_TRANSITION;
}
}
MaybeObject* KeyedStoreIC::Store(State state, MaybeObject* KeyedStoreIC::Store(State state,
StrictModeFlag strict_mode, StrictModeFlag strict_mode,
Handle<Object> object, Handle<Object> object,
@ -1706,18 +1819,7 @@ MaybeObject* KeyedStoreIC::Store(State state,
stub = non_strict_arguments_stub(); stub = non_strict_arguments_stub();
} else if (!force_generic) { } else if (!force_generic) {
if (key->IsSmi() && (target() != *non_strict_arguments_stub())) { if (key->IsSmi() && (target() != *non_strict_arguments_stub())) {
StubKind stub_kind = STORE_NO_TRANSITION; StubKind stub_kind = GetStubKind(receiver, key, value);
if (receiver->GetElementsKind() == FAST_SMI_ONLY_ELEMENTS) {
if (value->IsHeapNumber()) {
stub_kind = STORE_TRANSITION_SMI_TO_DOUBLE;
} else if (value->IsHeapObject()) {
stub_kind = STORE_TRANSITION_SMI_TO_OBJECT;
}
} else if (receiver->GetElementsKind() == FAST_DOUBLE_ELEMENTS) {
if (!value->IsSmi() && !value->IsHeapNumber()) {
stub_kind = STORE_TRANSITION_DOUBLE_TO_OBJECT;
}
}
stub = ComputeStub(receiver, stub_kind, strict_mode, stub); stub = ComputeStub(receiver, stub_kind, strict_mode, stub);
} }
} else { } else {
@ -1900,7 +2002,7 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) {
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state(); Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
return ic.Store(state, return ic.Store(state,
static_cast<StrictModeFlag>(extra_ic_state & kStrictMode), Code::GetStrictMode(extra_ic_state),
args.at<Object>(0), args.at<Object>(0),
args.at<String>(1), args.at<String>(1),
args.at<Object>(2)); args.at<Object>(2));
@ -1976,7 +2078,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) {
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state(); Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
return ic.Store(state, return ic.Store(state,
static_cast<StrictModeFlag>(extra_ic_state & kStrictMode), Code::GetStrictMode(extra_ic_state),
args.at<Object>(0), args.at<Object>(0),
args.at<Object>(1), args.at<Object>(1),
args.at<Object>(2), args.at<Object>(2),
@ -1992,8 +2094,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) {
Handle<Object> object = args.at<Object>(0); Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1); Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2); Handle<Object> value = args.at<Object>(2);
StrictModeFlag strict_mode = StrictModeFlag strict_mode = Code::GetStrictMode(extra_ic_state);
static_cast<StrictModeFlag>(extra_ic_state & kStrictMode);
return Runtime::SetObjectProperty(isolate, return Runtime::SetObjectProperty(isolate,
object, object,
key, key,
@ -2010,7 +2111,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissForceGeneric) {
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state(); Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
return ic.Store(state, return ic.Store(state,
static_cast<StrictModeFlag>(extra_ic_state & kStrictMode), Code::GetStrictMode(extra_ic_state),
args.at<Object>(0), args.at<Object>(0),
args.at<Object>(1), args.at<Object>(1),
args.at<Object>(2), args.at<Object>(2),

82
deps/v8/src/ic.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -165,7 +165,7 @@ class IC {
// Access the target code for the given IC address. // Access the target code for the given IC address.
static inline Code* GetTargetAtAddress(Address address); static inline Code* GetTargetAtAddress(Address address);
static inline void SetTargetAtAddress(Address address, Code* target); static inline void SetTargetAtAddress(Address address, Code* target);
static void PostPatching(); static void PostPatching(Address address, Code* target, Code* old_target);
private: private:
// Frame pointer for the frame that uses (calls) the IC. // Frame pointer for the frame that uses (calls) the IC.
@ -377,14 +377,48 @@ class KeyedIC: public IC {
STORE_NO_TRANSITION, STORE_NO_TRANSITION,
STORE_TRANSITION_SMI_TO_OBJECT, STORE_TRANSITION_SMI_TO_OBJECT,
STORE_TRANSITION_SMI_TO_DOUBLE, STORE_TRANSITION_SMI_TO_DOUBLE,
STORE_TRANSITION_DOUBLE_TO_OBJECT STORE_TRANSITION_DOUBLE_TO_OBJECT,
STORE_AND_GROW_NO_TRANSITION,
STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT,
STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE,
STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT
}; };
static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION -
STORE_NO_TRANSITION;
STATIC_ASSERT(kGrowICDelta ==
STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT -
STORE_TRANSITION_SMI_TO_OBJECT);
STATIC_ASSERT(kGrowICDelta ==
STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE -
STORE_TRANSITION_SMI_TO_DOUBLE);
STATIC_ASSERT(kGrowICDelta ==
STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT -
STORE_TRANSITION_DOUBLE_TO_OBJECT);
explicit KeyedIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {} explicit KeyedIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {}
virtual ~KeyedIC() {} virtual ~KeyedIC() {}
static inline KeyedAccessGrowMode GetGrowModeFromStubKind(
StubKind stub_kind) {
return (stub_kind >= STORE_AND_GROW_NO_TRANSITION)
? ALLOW_JSARRAY_GROWTH
: DO_NOT_ALLOW_JSARRAY_GROWTH;
}
static inline StubKind GetGrowStubKind(StubKind stub_kind) {
ASSERT(stub_kind != LOAD);
if (stub_kind < STORE_AND_GROW_NO_TRANSITION) {
stub_kind = static_cast<StubKind>(static_cast<int>(stub_kind) +
kGrowICDelta);
}
return stub_kind;
}
virtual Handle<Code> GetElementStubWithoutMapCheck( virtual Handle<Code> GetElementStubWithoutMapCheck(
bool is_js_array, bool is_js_array,
ElementsKind elements_kind) = 0; ElementsKind elements_kind,
KeyedAccessGrowMode grow_mode) = 0;
protected: protected:
virtual Handle<Code> string_stub() { virtual Handle<Code> string_stub() {
@ -398,12 +432,15 @@ class KeyedIC: public IC {
StrictModeFlag strict_mode, StrictModeFlag strict_mode,
Handle<Code> default_stub); Handle<Code> default_stub);
virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps, virtual Handle<Code> ComputePolymorphicStub(
StrictModeFlag strict_mode) = 0; MapHandleList* receiver_maps,
StrictModeFlag strict_mode,
KeyedAccessGrowMode grow_mode) = 0;
Handle<Code> ComputeMonomorphicStubWithoutMapCheck( Handle<Code> ComputeMonomorphicStubWithoutMapCheck(
Handle<Map> receiver_map, Handle<Map> receiver_map,
StrictModeFlag strict_mode); StrictModeFlag strict_mode,
KeyedAccessGrowMode grow_mode);
private: private:
void GetReceiverMapsForStub(Handle<Code> stub, MapHandleList* result); void GetReceiverMapsForStub(Handle<Code> stub, MapHandleList* result);
@ -417,7 +454,12 @@ class KeyedIC: public IC {
StubKind stub_kind); StubKind stub_kind);
static bool IsTransitionStubKind(StubKind stub_kind) { static bool IsTransitionStubKind(StubKind stub_kind) {
return stub_kind > STORE_NO_TRANSITION; return stub_kind > STORE_NO_TRANSITION &&
stub_kind != STORE_AND_GROW_NO_TRANSITION;
}
static bool IsGrowStubKind(StubKind stub_kind) {
return stub_kind >= STORE_AND_GROW_NO_TRANSITION;
} }
}; };
@ -456,7 +498,8 @@ class KeyedLoadIC: public KeyedIC {
virtual Handle<Code> GetElementStubWithoutMapCheck( virtual Handle<Code> GetElementStubWithoutMapCheck(
bool is_js_array, bool is_js_array,
ElementsKind elements_kind); ElementsKind elements_kind,
KeyedAccessGrowMode grow_mode);
virtual bool IsGeneric() const { virtual bool IsGeneric() const {
return target() == *generic_stub(); return target() == *generic_stub();
@ -466,7 +509,8 @@ class KeyedLoadIC: public KeyedIC {
virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; } virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps, virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps,
StrictModeFlag strict_mode); StrictModeFlag strict_mode,
KeyedAccessGrowMode grow_mode);
virtual Handle<Code> string_stub() { virtual Handle<Code> string_stub() {
return isolate()->builtins()->KeyedLoadIC_String(); return isolate()->builtins()->KeyedLoadIC_String();
@ -540,8 +584,8 @@ class StoreIC: public IC {
void set_target(Code* code) { void set_target(Code* code) {
// Strict mode must be preserved across IC patching. // Strict mode must be preserved across IC patching.
ASSERT((code->extra_ic_state() & kStrictMode) == ASSERT(Code::GetStrictMode(code->extra_ic_state()) ==
(target()->extra_ic_state() & kStrictMode)); Code::GetStrictMode(target()->extra_ic_state()));
IC::set_target(code); IC::set_target(code);
} }
@ -603,7 +647,8 @@ class KeyedStoreIC: public KeyedIC {
virtual Handle<Code> GetElementStubWithoutMapCheck( virtual Handle<Code> GetElementStubWithoutMapCheck(
bool is_js_array, bool is_js_array,
ElementsKind elements_kind); ElementsKind elements_kind,
KeyedAccessGrowMode grow_mode);
virtual bool IsGeneric() const { virtual bool IsGeneric() const {
return target() == *generic_stub() || return target() == *generic_stub() ||
@ -614,7 +659,8 @@ class KeyedStoreIC: public KeyedIC {
virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; } virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps, virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps,
StrictModeFlag strict_mode); StrictModeFlag strict_mode,
KeyedAccessGrowMode grow_mode);
private: private:
// Update the inline cache. // Update the inline cache.
@ -627,8 +673,8 @@ class KeyedStoreIC: public KeyedIC {
void set_target(Code* code) { void set_target(Code* code) {
// Strict mode must be preserved across IC patching. // Strict mode must be preserved across IC patching.
ASSERT((code->extra_ic_state() & kStrictMode) == ASSERT(Code::GetStrictMode(code->extra_ic_state()) ==
(target()->extra_ic_state() & kStrictMode)); Code::GetStrictMode(target()->extra_ic_state()));
IC::set_target(code); IC::set_target(code);
} }
@ -659,6 +705,10 @@ class KeyedStoreIC: public KeyedIC {
static void Clear(Address address, Code* target); static void Clear(Address address, Code* target);
StubKind GetStubKind(Handle<JSObject> receiver,
Handle<Object> key,
Handle<Object> value);
friend class IC; friend class IC;
}; };

2
deps/v8/src/isolate.cc

@ -775,11 +775,13 @@ void Isolate::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
HandleScope scope; HandleScope scope;
Handle<JSObject> receiver_handle(receiver); Handle<JSObject> receiver_handle(receiver);
Handle<Object> data(AccessCheckInfo::cast(data_obj)->data()); Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
{ VMState state(this, EXTERNAL);
thread_local_top()->failed_access_check_callback_( thread_local_top()->failed_access_check_callback_(
v8::Utils::ToLocal(receiver_handle), v8::Utils::ToLocal(receiver_handle),
type, type,
v8::Utils::ToLocal(data)); v8::Utils::ToLocal(data));
} }
}
enum MayAccessDecision { enum MayAccessDecision {

2
deps/v8/src/isolate.h

@ -333,8 +333,6 @@ class HashMap;
typedef List<HeapObject*, PreallocatedStorage> DebugObjectCache; typedef List<HeapObject*, PreallocatedStorage> DebugObjectCache;
#define ISOLATE_INIT_LIST(V) \ #define ISOLATE_INIT_LIST(V) \
/* AssertNoZoneAllocation state. */ \
V(bool, zone_allow_allocation, true) \
/* SerializerDeserializer state. */ \ /* SerializerDeserializer state. */ \
V(int, serialize_partial_snapshot_cache_length, 0) \ V(int, serialize_partial_snapshot_cache_length, 0) \
/* Assembler state. */ \ /* Assembler state. */ \

30
deps/v8/src/jsregexp.cc

@ -3597,22 +3597,20 @@ void RegExpEngine::DotPrint(const char* label,
// ------------------------------------------------------------------- // -------------------------------------------------------------------
// Tree to graph conversion // Tree to graph conversion
static const int kSpaceRangeCount = 20; static const uc16 kSpaceRanges[] = { 0x0009, 0x000D, 0x0020, 0x0020, 0x00A0,
static const int kSpaceRangeAsciiCount = 4; 0x00A0, 0x1680, 0x1680, 0x180E, 0x180E, 0x2000, 0x200A, 0x2028, 0x2029,
static const uc16 kSpaceRanges[kSpaceRangeCount] = { 0x0009, 0x000D, 0x0020, 0x202F, 0x202F, 0x205F, 0x205F, 0x3000, 0x3000, 0xFEFF, 0xFEFF };
0x0020, 0x00A0, 0x00A0, 0x1680, 0x1680, 0x180E, 0x180E, 0x2000, 0x200A, static const int kSpaceRangeCount = ARRAY_SIZE(kSpaceRanges);
0x2028, 0x2029, 0x202F, 0x202F, 0x205F, 0x205F, 0x3000, 0x3000 };
static const uc16 kWordRanges[] = { '0', '9', 'A', 'Z', '_', '_', 'a', 'z' };
static const int kWordRangeCount = 8; static const int kWordRangeCount = ARRAY_SIZE(kWordRanges);
static const uc16 kWordRanges[kWordRangeCount] = { '0', '9', 'A', 'Z', '_',
'_', 'a', 'z' }; static const uc16 kDigitRanges[] = { '0', '9' };
static const int kDigitRangeCount = ARRAY_SIZE(kDigitRanges);
static const int kDigitRangeCount = 2;
static const uc16 kDigitRanges[kDigitRangeCount] = { '0', '9' }; static const uc16 kLineTerminatorRanges[] = { 0x000A, 0x000A, 0x000D, 0x000D,
0x2028, 0x2029 };
static const int kLineTerminatorRangeCount = 6; static const int kLineTerminatorRangeCount = ARRAY_SIZE(kLineTerminatorRanges);
static const uc16 kLineTerminatorRanges[kLineTerminatorRangeCount] = { 0x000A,
0x000A, 0x000D, 0x000D, 0x2028, 0x2029 };
RegExpNode* RegExpAtom::ToNode(RegExpCompiler* compiler, RegExpNode* RegExpAtom::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) { RegExpNode* on_success) {

119
deps/v8/src/lithium-allocator.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -110,9 +110,9 @@ bool UsePosition::RegisterIsBeneficial() const {
} }
void UseInterval::SplitAt(LifetimePosition pos) { void UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
ASSERT(Contains(pos) && pos.Value() != start().Value()); ASSERT(Contains(pos) && pos.Value() != start().Value());
UseInterval* after = new UseInterval(pos, end_); UseInterval* after = new(zone) UseInterval(pos, end_);
after->next_ = next_; after->next_ = next_;
next_ = after; next_ = after;
end_ = pos; end_ = pos;
@ -149,7 +149,7 @@ bool LiveRange::HasOverlap(UseInterval* target) const {
#endif #endif
LiveRange::LiveRange(int id) LiveRange::LiveRange(int id, Zone* zone)
: id_(id), : id_(id),
spilled_(false), spilled_(false),
is_double_(false), is_double_(false),
@ -161,24 +161,26 @@ LiveRange::LiveRange(int id)
next_(NULL), next_(NULL),
current_interval_(NULL), current_interval_(NULL),
last_processed_use_(NULL), last_processed_use_(NULL),
spill_operand_(new LOperand()), spill_operand_(new(zone) LOperand()),
spill_start_index_(kMaxInt) { } spill_start_index_(kMaxInt) { }
void LiveRange::set_assigned_register(int reg, RegisterKind register_kind) { void LiveRange::set_assigned_register(int reg,
RegisterKind register_kind,
Zone* zone) {
ASSERT(!HasRegisterAssigned() && !IsSpilled()); ASSERT(!HasRegisterAssigned() && !IsSpilled());
assigned_register_ = reg; assigned_register_ = reg;
is_double_ = (register_kind == DOUBLE_REGISTERS); is_double_ = (register_kind == DOUBLE_REGISTERS);
ConvertOperands(); ConvertOperands(zone);
} }
void LiveRange::MakeSpilled() { void LiveRange::MakeSpilled(Zone* zone) {
ASSERT(!IsSpilled()); ASSERT(!IsSpilled());
ASSERT(TopLevel()->HasAllocatedSpillOperand()); ASSERT(TopLevel()->HasAllocatedSpillOperand());
spilled_ = true; spilled_ = true;
assigned_register_ = kInvalidAssignment; assigned_register_ = kInvalidAssignment;
ConvertOperands(); ConvertOperands(zone);
} }
@ -246,7 +248,7 @@ UsePosition* LiveRange::FirstPosWithHint() const {
} }
LOperand* LiveRange::CreateAssignedOperand() { LOperand* LiveRange::CreateAssignedOperand(Zone* zone) {
LOperand* op = NULL; LOperand* op = NULL;
if (HasRegisterAssigned()) { if (HasRegisterAssigned()) {
ASSERT(!IsSpilled()); ASSERT(!IsSpilled());
@ -260,7 +262,7 @@ LOperand* LiveRange::CreateAssignedOperand() {
op = TopLevel()->GetSpillOperand(); op = TopLevel()->GetSpillOperand();
ASSERT(!op->IsUnallocated()); ASSERT(!op->IsUnallocated());
} else { } else {
LUnallocated* unalloc = new LUnallocated(LUnallocated::NONE); LUnallocated* unalloc = new(zone) LUnallocated(LUnallocated::NONE);
unalloc->set_virtual_register(id_); unalloc->set_virtual_register(id_);
op = unalloc; op = unalloc;
} }
@ -292,7 +294,9 @@ void LiveRange::AdvanceLastProcessedMarker(
} }
void LiveRange::SplitAt(LifetimePosition position, LiveRange* result) { void LiveRange::SplitAt(LifetimePosition position,
LiveRange* result,
Zone* zone) {
ASSERT(Start().Value() < position.Value()); ASSERT(Start().Value() < position.Value());
ASSERT(result->IsEmpty()); ASSERT(result->IsEmpty());
// Find the last interval that ends before the position. If the // Find the last interval that ends before the position. If the
@ -311,7 +315,7 @@ void LiveRange::SplitAt(LifetimePosition position, LiveRange* result) {
while (current != NULL) { while (current != NULL) {
if (current->Contains(position)) { if (current->Contains(position)) {
current->SplitAt(position); current->SplitAt(position, zone);
break; break;
} }
UseInterval* next = current->next(); UseInterval* next = current->next();
@ -404,7 +408,9 @@ void LiveRange::ShortenTo(LifetimePosition start) {
} }
void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end) { void LiveRange::EnsureInterval(LifetimePosition start,
LifetimePosition end,
Zone* zone) {
LAllocator::TraceAlloc("Ensure live range %d in interval [%d %d[\n", LAllocator::TraceAlloc("Ensure live range %d in interval [%d %d[\n",
id_, id_,
start.Value(), start.Value(),
@ -418,7 +424,7 @@ void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end) {
first_interval_ = first_interval_->next(); first_interval_ = first_interval_->next();
} }
UseInterval* new_interval = new UseInterval(start, new_end); UseInterval* new_interval = new(zone) UseInterval(start, new_end);
new_interval->next_ = first_interval_; new_interval->next_ = first_interval_;
first_interval_ = new_interval; first_interval_ = new_interval;
if (new_interval->next() == NULL) { if (new_interval->next() == NULL) {
@ -427,20 +433,22 @@ void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end) {
} }
void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end) { void LiveRange::AddUseInterval(LifetimePosition start,
LifetimePosition end,
Zone* zone) {
LAllocator::TraceAlloc("Add to live range %d interval [%d %d[\n", LAllocator::TraceAlloc("Add to live range %d interval [%d %d[\n",
id_, id_,
start.Value(), start.Value(),
end.Value()); end.Value());
if (first_interval_ == NULL) { if (first_interval_ == NULL) {
UseInterval* interval = new UseInterval(start, end); UseInterval* interval = new(zone) UseInterval(start, end);
first_interval_ = interval; first_interval_ = interval;
last_interval_ = interval; last_interval_ = interval;
} else { } else {
if (end.Value() == first_interval_->start().Value()) { if (end.Value() == first_interval_->start().Value()) {
first_interval_->set_start(start); first_interval_->set_start(start);
} else if (end.Value() < first_interval_->start().Value()) { } else if (end.Value() < first_interval_->start().Value()) {
UseInterval* interval = new UseInterval(start, end); UseInterval* interval = new(zone) UseInterval(start, end);
interval->set_next(first_interval_); interval->set_next(first_interval_);
first_interval_ = interval; first_interval_ = interval;
} else { } else {
@ -456,11 +464,12 @@ void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end) {
UsePosition* LiveRange::AddUsePosition(LifetimePosition pos, UsePosition* LiveRange::AddUsePosition(LifetimePosition pos,
LOperand* operand) { LOperand* operand,
Zone* zone) {
LAllocator::TraceAlloc("Add to live range %d use position %d\n", LAllocator::TraceAlloc("Add to live range %d use position %d\n",
id_, id_,
pos.Value()); pos.Value());
UsePosition* use_pos = new UsePosition(pos, operand); UsePosition* use_pos = new(zone) UsePosition(pos, operand);
UsePosition* prev = NULL; UsePosition* prev = NULL;
UsePosition* current = first_pos_; UsePosition* current = first_pos_;
while (current != NULL && current->pos().Value() < pos.Value()) { while (current != NULL && current->pos().Value() < pos.Value()) {
@ -480,8 +489,8 @@ UsePosition* LiveRange::AddUsePosition(LifetimePosition pos,
} }
void LiveRange::ConvertOperands() { void LiveRange::ConvertOperands(Zone* zone) {
LOperand* op = CreateAssignedOperand(); LOperand* op = CreateAssignedOperand(zone);
UsePosition* use_pos = first_pos(); UsePosition* use_pos = first_pos();
while (use_pos != NULL) { while (use_pos != NULL) {
ASSERT(Start().Value() <= use_pos->pos().Value() && ASSERT(Start().Value() <= use_pos->pos().Value() &&
@ -545,8 +554,8 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
LAllocator::LAllocator(int num_values, HGraph* graph) LAllocator::LAllocator(int num_values, HGraph* graph)
: chunk_(NULL), : zone_(graph->zone()),
allocation_ok_(true), chunk_(NULL),
live_in_sets_(graph->blocks()->length()), live_in_sets_(graph->blocks()->length()),
live_ranges_(num_values * 2), live_ranges_(num_values * 2),
fixed_live_ranges_(NULL), fixed_live_ranges_(NULL),
@ -560,7 +569,8 @@ LAllocator::LAllocator(int num_values, HGraph* graph)
mode_(GENERAL_REGISTERS), mode_(GENERAL_REGISTERS),
num_registers_(-1), num_registers_(-1),
graph_(graph), graph_(graph),
has_osr_entry_(false) {} has_osr_entry_(false),
allocation_ok_(true) { }
void LAllocator::InitializeLivenessAnalysis() { void LAllocator::InitializeLivenessAnalysis() {
@ -574,7 +584,7 @@ void LAllocator::InitializeLivenessAnalysis() {
BitVector* LAllocator::ComputeLiveOut(HBasicBlock* block) { BitVector* LAllocator::ComputeLiveOut(HBasicBlock* block) {
// Compute live out for the given block, except not including backward // Compute live out for the given block, except not including backward
// successor edges. // successor edges.
BitVector* live_out = new BitVector(next_virtual_register_); BitVector* live_out = new(zone_) BitVector(next_virtual_register_, zone_);
// Process all successor blocks. // Process all successor blocks.
for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) { for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
@ -612,7 +622,7 @@ void LAllocator::AddInitialIntervals(HBasicBlock* block,
while (!iterator.Done()) { while (!iterator.Done()) {
int operand_index = iterator.Current(); int operand_index = iterator.Current();
LiveRange* range = LiveRangeFor(operand_index); LiveRange* range = LiveRangeFor(operand_index);
range->AddUseInterval(start, end); range->AddUseInterval(start, end, zone_);
iterator.Advance(); iterator.Advance();
} }
} }
@ -654,9 +664,9 @@ LiveRange* LAllocator::FixedLiveRangeFor(int index) {
ASSERT(index < Register::kNumAllocatableRegisters); ASSERT(index < Register::kNumAllocatableRegisters);
LiveRange* result = fixed_live_ranges_[index]; LiveRange* result = fixed_live_ranges_[index];
if (result == NULL) { if (result == NULL) {
result = new LiveRange(FixedLiveRangeID(index)); result = new(zone_) LiveRange(FixedLiveRangeID(index), zone_);
ASSERT(result->IsFixed()); ASSERT(result->IsFixed());
result->set_assigned_register(index, GENERAL_REGISTERS); result->set_assigned_register(index, GENERAL_REGISTERS, zone_);
fixed_live_ranges_[index] = result; fixed_live_ranges_[index] = result;
} }
return result; return result;
@ -667,9 +677,9 @@ LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
ASSERT(index < DoubleRegister::kNumAllocatableRegisters); ASSERT(index < DoubleRegister::kNumAllocatableRegisters);
LiveRange* result = fixed_double_live_ranges_[index]; LiveRange* result = fixed_double_live_ranges_[index];
if (result == NULL) { if (result == NULL) {
result = new LiveRange(FixedDoubleLiveRangeID(index)); result = new(zone_) LiveRange(FixedDoubleLiveRangeID(index), zone_);
ASSERT(result->IsFixed()); ASSERT(result->IsFixed());
result->set_assigned_register(index, DOUBLE_REGISTERS); result->set_assigned_register(index, DOUBLE_REGISTERS, zone_);
fixed_double_live_ranges_[index] = result; fixed_double_live_ranges_[index] = result;
} }
return result; return result;
@ -682,7 +692,7 @@ LiveRange* LAllocator::LiveRangeFor(int index) {
} }
LiveRange* result = live_ranges_[index]; LiveRange* result = live_ranges_[index];
if (result == NULL) { if (result == NULL) {
result = new LiveRange(index); result = new(zone_) LiveRange(index, zone_);
live_ranges_[index] = result; live_ranges_[index] = result;
} }
return result; return result;
@ -728,15 +738,15 @@ void LAllocator::Define(LifetimePosition position,
if (range->IsEmpty() || range->Start().Value() > position.Value()) { if (range->IsEmpty() || range->Start().Value() > position.Value()) {
// Can happen if there is a definition without use. // Can happen if there is a definition without use.
range->AddUseInterval(position, position.NextInstruction()); range->AddUseInterval(position, position.NextInstruction(), zone_);
range->AddUsePosition(position.NextInstruction(), NULL); range->AddUsePosition(position.NextInstruction(), NULL, zone_);
} else { } else {
range->ShortenTo(position); range->ShortenTo(position);
} }
if (operand->IsUnallocated()) { if (operand->IsUnallocated()) {
LUnallocated* unalloc_operand = LUnallocated::cast(operand); LUnallocated* unalloc_operand = LUnallocated::cast(operand);
range->AddUsePosition(position, unalloc_operand)->set_hint(hint); range->AddUsePosition(position, unalloc_operand, zone_)->set_hint(hint);
} }
} }
@ -749,9 +759,9 @@ void LAllocator::Use(LifetimePosition block_start,
if (range == NULL) return; if (range == NULL) return;
if (operand->IsUnallocated()) { if (operand->IsUnallocated()) {
LUnallocated* unalloc_operand = LUnallocated::cast(operand); LUnallocated* unalloc_operand = LUnallocated::cast(operand);
range->AddUsePosition(position, unalloc_operand)->set_hint(hint); range->AddUsePosition(position, unalloc_operand, zone_)->set_hint(hint);
} }
range->AddUseInterval(block_start, position); range->AddUseInterval(block_start, position, zone_);
} }
@ -860,7 +870,8 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
if (RequiredRegisterKind(input_copy->virtual_register()) == if (RequiredRegisterKind(input_copy->virtual_register()) ==
DOUBLE_REGISTERS) { DOUBLE_REGISTERS) {
double_artificial_registers_.Add( double_artificial_registers_.Add(
cur_input->virtual_register() - first_artificial_register_); cur_input->virtual_register() - first_artificial_register_,
zone_);
} }
AddConstraintsGapMove(gap_index, input_copy, cur_input); AddConstraintsGapMove(gap_index, input_copy, cur_input);
@ -964,7 +975,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
output->index() != i) { output->index() != i) {
LiveRange* range = FixedLiveRangeFor(i); LiveRange* range = FixedLiveRangeFor(i);
range->AddUseInterval(curr_position, range->AddUseInterval(curr_position,
curr_position.InstructionEnd()); curr_position.InstructionEnd(),
zone_);
} }
} }
} }
@ -975,7 +987,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
output->index() != i) { output->index() != i) {
LiveRange* range = FixedDoubleLiveRangeFor(i); LiveRange* range = FixedDoubleLiveRangeFor(i);
range->AddUseInterval(curr_position, range->AddUseInterval(curr_position,
curr_position.InstructionEnd()); curr_position.InstructionEnd(),
zone_);
} }
} }
} }
@ -1023,7 +1036,7 @@ void LAllocator::ResolvePhis(HBasicBlock* block) {
const ZoneList<HPhi*>* phis = block->phis(); const ZoneList<HPhi*>* phis = block->phis();
for (int i = 0; i < phis->length(); ++i) { for (int i = 0; i < phis->length(); ++i) {
HPhi* phi = phis->at(i); HPhi* phi = phis->at(i);
LUnallocated* phi_operand = new LUnallocated(LUnallocated::NONE); LUnallocated* phi_operand = new(zone_) LUnallocated(LUnallocated::NONE);
phi_operand->set_virtual_register(phi->id()); phi_operand->set_virtual_register(phi->id());
for (int j = 0; j < phi->OperandCount(); ++j) { for (int j = 0; j < phi->OperandCount(); ++j) {
HValue* op = phi->OperandAt(j); HValue* op = phi->OperandAt(j);
@ -1033,7 +1046,7 @@ void LAllocator::ResolvePhis(HBasicBlock* block) {
operand = chunk_->DefineConstantOperand(constant); operand = chunk_->DefineConstantOperand(constant);
} else { } else {
ASSERT(!op->EmitAtUses()); ASSERT(!op->EmitAtUses());
LUnallocated* unalloc = new LUnallocated(LUnallocated::ANY); LUnallocated* unalloc = new(zone_) LUnallocated(LUnallocated::ANY);
unalloc->set_virtual_register(op->id()); unalloc->set_virtual_register(op->id());
operand = unalloc; operand = unalloc;
} }
@ -1140,8 +1153,8 @@ void LAllocator::ResolveControlFlow(LiveRange* range,
if (cur_cover->IsSpilled()) return; if (cur_cover->IsSpilled()) return;
ASSERT(pred_cover != NULL && cur_cover != NULL); ASSERT(pred_cover != NULL && cur_cover != NULL);
if (pred_cover != cur_cover) { if (pred_cover != cur_cover) {
LOperand* pred_op = pred_cover->CreateAssignedOperand(); LOperand* pred_op = pred_cover->CreateAssignedOperand(zone_);
LOperand* cur_op = cur_cover->CreateAssignedOperand(); LOperand* cur_op = cur_cover->CreateAssignedOperand(zone_);
if (!pred_op->Equals(cur_op)) { if (!pred_op->Equals(cur_op)) {
LGap* gap = NULL; LGap* gap = NULL;
if (block->predecessors()->length() == 1) { if (block->predecessors()->length() == 1) {
@ -1213,8 +1226,8 @@ void LAllocator::ConnectRanges() {
} }
if (should_insert) { if (should_insert) {
LParallelMove* move = GetConnectingParallelMove(pos); LParallelMove* move = GetConnectingParallelMove(pos);
LOperand* prev_operand = first_range->CreateAssignedOperand(); LOperand* prev_operand = first_range->CreateAssignedOperand(zone_);
LOperand* cur_operand = second_range->CreateAssignedOperand(); LOperand* cur_operand = second_range->CreateAssignedOperand(zone_);
move->AddMove(prev_operand, cur_operand); move->AddMove(prev_operand, cur_operand);
} }
} }
@ -1317,7 +1330,7 @@ void LAllocator::BuildLiveRanges() {
while (!iterator.Done()) { while (!iterator.Done()) {
int operand_index = iterator.Current(); int operand_index = iterator.Current();
LiveRange* range = LiveRangeFor(operand_index); LiveRange* range = LiveRangeFor(operand_index);
range->EnsureInterval(start, end); range->EnsureInterval(start, end, zone_);
iterator.Advance(); iterator.Advance();
} }
@ -1438,7 +1451,7 @@ void LAllocator::PopulatePointerMaps() {
TraceAlloc("Pointer in register for range %d (start at %d) " TraceAlloc("Pointer in register for range %d (start at %d) "
"at safe point %d\n", "at safe point %d\n",
cur->id(), cur->Start().Value(), safe_point); cur->id(), cur->Start().Value(), safe_point);
LOperand* operand = cur->CreateAssignedOperand(); LOperand* operand = cur->CreateAssignedOperand(zone_);
ASSERT(!operand->IsStackSlot()); ASSERT(!operand->IsStackSlot());
map->RecordPointer(operand); map->RecordPointer(operand);
} }
@ -1810,7 +1823,7 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
TraceAlloc("Assigning preferred reg %s to live range %d\n", TraceAlloc("Assigning preferred reg %s to live range %d\n",
RegisterName(register_index), RegisterName(register_index),
current->id()); current->id());
current->set_assigned_register(register_index, mode_); current->set_assigned_register(register_index, mode_, zone_);
return true; return true;
} }
} }
@ -1846,7 +1859,7 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
TraceAlloc("Assigning free reg %s to live range %d\n", TraceAlloc("Assigning free reg %s to live range %d\n",
RegisterName(reg), RegisterName(reg),
current->id()); current->id());
current->set_assigned_register(reg, mode_); current->set_assigned_register(reg, mode_, zone_);
return true; return true;
} }
@ -1936,7 +1949,7 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) {
TraceAlloc("Assigning blocked reg %s to live range %d\n", TraceAlloc("Assigning blocked reg %s to live range %d\n",
RegisterName(reg), RegisterName(reg),
current->id()); current->id());
current->set_assigned_register(reg, mode_); current->set_assigned_register(reg, mode_, zone_);
// This register was not free. Thus we need to find and spill // This register was not free. Thus we need to find and spill
// parts of active and inactive live regions that use the same register // parts of active and inactive live regions that use the same register
@ -2003,7 +2016,7 @@ LiveRange* LAllocator::SplitRangeAt(LiveRange* range, LifetimePosition pos) {
LiveRange* result = LiveRangeFor(GetVirtualRegister()); LiveRange* result = LiveRangeFor(GetVirtualRegister());
if (!AllocationOk()) return NULL; if (!AllocationOk()) return NULL;
range->SplitAt(pos, result); range->SplitAt(pos, result, zone_);
return result; return result;
} }
@ -2102,7 +2115,7 @@ void LAllocator::Spill(LiveRange* range) {
if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == DOUBLE_REGISTERS); if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == DOUBLE_REGISTERS);
first->SetSpillOperand(op); first->SetSpillOperand(op);
} }
range->MakeSpilled(); range->MakeSpilled(zone_);
} }

46
deps/v8/src/lithium-allocator.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -216,7 +216,7 @@ class UseInterval: public ZoneObject {
// Split this interval at the given position without effecting the // Split this interval at the given position without effecting the
// live range that owns it. The interval must contain the position. // live range that owns it. The interval must contain the position.
void SplitAt(LifetimePosition pos); void SplitAt(LifetimePosition pos, Zone* zone);
// If this interval intersects with other return smallest position // If this interval intersects with other return smallest position
// that belongs to both of them. // that belongs to both of them.
@ -277,7 +277,7 @@ class LiveRange: public ZoneObject {
public: public:
static const int kInvalidAssignment = 0x7fffffff; static const int kInvalidAssignment = 0x7fffffff;
explicit LiveRange(int id); LiveRange(int id, Zone* zone);
UseInterval* first_interval() const { return first_interval_; } UseInterval* first_interval() const { return first_interval_; }
UsePosition* first_pos() const { return first_pos_; } UsePosition* first_pos() const { return first_pos_; }
@ -288,11 +288,13 @@ class LiveRange: public ZoneObject {
int id() const { return id_; } int id() const { return id_; }
bool IsFixed() const { return id_ < 0; } bool IsFixed() const { return id_ < 0; }
bool IsEmpty() const { return first_interval() == NULL; } bool IsEmpty() const { return first_interval() == NULL; }
LOperand* CreateAssignedOperand(); LOperand* CreateAssignedOperand(Zone* zone);
int assigned_register() const { return assigned_register_; } int assigned_register() const { return assigned_register_; }
int spill_start_index() const { return spill_start_index_; } int spill_start_index() const { return spill_start_index_; }
void set_assigned_register(int reg, RegisterKind register_kind); void set_assigned_register(int reg,
void MakeSpilled(); RegisterKind register_kind,
Zone* zone);
void MakeSpilled(Zone* zone);
// Returns use position in this live range that follows both start // Returns use position in this live range that follows both start
// and last processed use position. // and last processed use position.
@ -316,7 +318,7 @@ class LiveRange: public ZoneObject {
// the range. // the range.
// All uses following the given position will be moved from this // All uses following the given position will be moved from this
// live range to the result live range. // live range to the result live range.
void SplitAt(LifetimePosition position, LiveRange* result); void SplitAt(LifetimePosition position, LiveRange* result, Zone* zone);
bool IsDouble() const { return is_double_; } bool IsDouble() const { return is_double_; }
bool HasRegisterAssigned() const { bool HasRegisterAssigned() const {
@ -355,9 +357,15 @@ class LiveRange: public ZoneObject {
LifetimePosition FirstIntersection(LiveRange* other); LifetimePosition FirstIntersection(LiveRange* other);
// Add a new interval or a new use position to this live range. // Add a new interval or a new use position to this live range.
void EnsureInterval(LifetimePosition start, LifetimePosition end); void EnsureInterval(LifetimePosition start,
void AddUseInterval(LifetimePosition start, LifetimePosition end); LifetimePosition end,
UsePosition* AddUsePosition(LifetimePosition pos, LOperand* operand); Zone* zone);
void AddUseInterval(LifetimePosition start,
LifetimePosition end,
Zone* zone);
UsePosition* AddUsePosition(LifetimePosition pos,
LOperand* operand,
Zone* zone);
// Shorten the most recently added interval by setting a new start. // Shorten the most recently added interval by setting a new start.
void ShortenTo(LifetimePosition start); void ShortenTo(LifetimePosition start);
@ -369,7 +377,7 @@ class LiveRange: public ZoneObject {
#endif #endif
private: private:
void ConvertOperands(); void ConvertOperands(Zone* zone);
UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const; UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const;
void AdvanceLastProcessedMarker(UseInterval* to_start_of, void AdvanceLastProcessedMarker(UseInterval* to_start_of,
LifetimePosition but_not_past) const; LifetimePosition but_not_past) const;
@ -400,8 +408,8 @@ class GrowableBitVector BASE_EMBEDDED {
return bits_->Contains(value); return bits_->Contains(value);
} }
void Add(int value) { void Add(int value, Zone* zone) {
EnsureCapacity(value); EnsureCapacity(value, zone);
bits_->Add(value); bits_->Add(value);
} }
@ -412,11 +420,11 @@ class GrowableBitVector BASE_EMBEDDED {
return bits_ != NULL && bits_->length() > value; return bits_ != NULL && bits_->length() > value;
} }
void EnsureCapacity(int value) { void EnsureCapacity(int value, Zone* zone) {
if (InBitsRange(value)) return; if (InBitsRange(value)) return;
int new_length = bits_ == NULL ? kInitialLength : bits_->length(); int new_length = bits_ == NULL ? kInitialLength : bits_->length();
while (new_length <= value) new_length *= 2; while (new_length <= value) new_length *= 2;
BitVector* new_bits = new BitVector(new_length); BitVector* new_bits = new(zone) BitVector(new_length, zone);
if (bits_ != NULL) new_bits->CopyFrom(*bits_); if (bits_ != NULL) new_bits->CopyFrom(*bits_);
bits_ = new_bits; bits_ = new_bits;
} }
@ -587,10 +595,9 @@ class LAllocator BASE_EMBEDDED {
inline LGap* GapAt(int index); inline LGap* GapAt(int index);
LChunk* chunk_; Zone* zone_;
// Indicates success or failure during register allocation. LChunk* chunk_;
bool allocation_ok_;
// During liveness analysis keep a mapping from block id to live_in sets // During liveness analysis keep a mapping from block id to live_in sets
// for blocks already analyzed. // for blocks already analyzed.
@ -621,6 +628,9 @@ class LAllocator BASE_EMBEDDED {
bool has_osr_entry_; bool has_osr_entry_;
// Indicates success or failure during register allocation.
bool allocation_ok_;
DISALLOW_COPY_AND_ASSIGN(LAllocator); DISALLOW_COPY_AND_ASSIGN(LAllocator);
}; };

5
deps/v8/src/lithium.h

@ -453,11 +453,10 @@ class LEnvironment: public ZoneObject {
parameter_count_(parameter_count), parameter_count_(parameter_count),
pc_offset_(-1), pc_offset_(-1),
values_(value_count), values_(value_count),
is_tagged_(value_count), is_tagged_(value_count, closure->GetHeap()->isolate()->zone()),
spilled_registers_(NULL), spilled_registers_(NULL),
spilled_double_registers_(NULL), spilled_double_registers_(NULL),
outer_(outer) { outer_(outer) { }
}
Handle<JSFunction> closure() const { return closure_; } Handle<JSFunction> closure() const { return closure_; }
int arguments_stack_height() const { return arguments_stack_height_; } int arguments_stack_height() const { return arguments_stack_height_; }

7
deps/v8/src/macro-assembler.h

@ -36,13 +36,6 @@ enum InvokeFlag {
}; };
// Types of uncatchable exceptions.
enum UncatchableExceptionType {
OUT_OF_MEMORY,
TERMINATION
};
// Invalid depth in prototype chain. // Invalid depth in prototype chain.
const int kInvalidProtoDepth = -1; const int kInvalidProtoDepth = -1;

6
deps/v8/src/mark-compact.cc

@ -1176,13 +1176,17 @@ class StaticMarkingVisitor : public StaticVisitorBase {
Heap* heap = map->GetHeap(); Heap* heap = map->GetHeap();
Code* code = reinterpret_cast<Code*>(object); Code* code = reinterpret_cast<Code*>(object);
if (FLAG_cleanup_code_caches_at_gc) { if (FLAG_cleanup_code_caches_at_gc) {
TypeFeedbackCells* type_feedback_cells = code->type_feedback_cells(); Object* raw_info = code->type_feedback_info();
if (raw_info->IsTypeFeedbackInfo()) {
TypeFeedbackCells* type_feedback_cells =
TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
for (int i = 0; i < type_feedback_cells->CellCount(); i++) { for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
ASSERT(type_feedback_cells->AstId(i)->IsSmi()); ASSERT(type_feedback_cells->AstId(i)->IsSmi());
JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i); JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i);
cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap)); cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
} }
} }
}
code->CodeIterateBody<StaticMarkingVisitor>(heap); code->CodeIterateBody<StaticMarkingVisitor>(heap);
} }

8
deps/v8/src/math.js

@ -29,15 +29,15 @@
// Keep reference to original values of some global properties. This // Keep reference to original values of some global properties. This
// has the added benefit that the code in this file is isolated from // has the added benefit that the code in this file is isolated from
// changes to these properties. // changes to these properties.
const $floor = MathFloor; var $floor = MathFloor;
const $random = MathRandom; var $random = MathRandom;
const $abs = MathAbs; var $abs = MathAbs;
// Instance class name can only be set on functions. That is the only // Instance class name can only be set on functions. That is the only
// purpose for MathConstructor. // purpose for MathConstructor.
function MathConstructor() {} function MathConstructor() {}
%FunctionSetInstanceClassName(MathConstructor, 'Math'); %FunctionSetInstanceClassName(MathConstructor, 'Math');
const $Math = new MathConstructor(); var $Math = new MathConstructor();
$Math.__proto__ = $Object.prototype; $Math.__proto__ = $Object.prototype;
%SetProperty(global, "Math", $Math, DONT_ENUM); %SetProperty(global, "Math", $Math, DONT_ENUM);

27
deps/v8/src/messages.js

@ -25,17 +25,16 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ------------------------------------------------------------------- // -------------------------------------------------------------------
// //
// If this object gets passed to an error constructor the error will // If this object gets passed to an error constructor the error will
// get an accessor for .message that constructs a descriptive error // get an accessor for .message that constructs a descriptive error
// message on access. // message on access.
const kAddMessageAccessorsMarker = { }; var kAddMessageAccessorsMarker = { };
// This will be lazily initialized when first needed (and forcibly // This will be lazily initialized when first needed (and forcibly
// overwritten even though it's const). // overwritten even though it's const).
const kMessages = 0; var kMessages = 0;
function FormatString(format, message) { function FormatString(format, message) {
var args = %MessageGetArguments(message); var args = %MessageGetArguments(message);
@ -603,7 +602,7 @@ function SourceLocation(script, position, line, column, start, end) {
this.end = end; this.end = end;
} }
const kLineLengthLimit = 78; var kLineLengthLimit = 78;
/** /**
* Restrict source location start and end positions to make the source slice * Restrict source location start and end positions to make the source slice
@ -748,18 +747,18 @@ function DefineOneShotAccessor(obj, name, fun) {
// can't rely on 'this' being the same as 'obj'. // can't rely on 'this' being the same as 'obj'.
var hasBeenSet = false; var hasBeenSet = false;
var value; var value;
function getter() { var getter = function() {
if (hasBeenSet) { if (hasBeenSet) {
return value; return value;
} }
hasBeenSet = true; hasBeenSet = true;
value = fun(obj); value = fun(obj);
return value; return value;
} };
function setter(v) { var setter = function(v) {
hasBeenSet = true; hasBeenSet = true;
value = v; value = v;
} };
%DefineOrRedefineAccessorProperty(obj, name, GETTER, getter, DONT_ENUM); %DefineOrRedefineAccessorProperty(obj, name, GETTER, getter, DONT_ENUM);
%DefineOrRedefineAccessorProperty(obj, name, SETTER, setter, DONT_ENUM); %DefineOrRedefineAccessorProperty(obj, name, SETTER, setter, DONT_ENUM);
} }
@ -1090,7 +1089,7 @@ function captureStackTrace(obj, cons_opt) {
function SetUpError() { function SetUpError() {
// Define special error type constructors. // Define special error type constructors.
function DefineError(f) { var DefineError = function(f) {
// Store the error function in both the global object // Store the error function in both the global object
// and the runtime object. The function is fetched // and the runtime object. The function is fetched
// from the runtime object when throwing errors from // from the runtime object when throwing errors from
@ -1106,7 +1105,7 @@ function SetUpError() {
// However, it can't be an instance of the Error object because // However, it can't be an instance of the Error object because
// it hasn't been properly configured yet. Instead we create a // it hasn't been properly configured yet. Instead we create a
// special not-a-true-error-but-close-enough object. // special not-a-true-error-but-close-enough object.
function ErrorPrototype() {} var ErrorPrototype = function() {};
%FunctionSetPrototype(ErrorPrototype, $Object.prototype); %FunctionSetPrototype(ErrorPrototype, $Object.prototype);
%FunctionSetInstanceClassName(ErrorPrototype, 'Error'); %FunctionSetInstanceClassName(ErrorPrototype, 'Error');
%FunctionSetPrototype(f, new ErrorPrototype()); %FunctionSetPrototype(f, new ErrorPrototype());
@ -1148,7 +1147,7 @@ function SetUpError() {
} }
}); });
%SetNativeFlag(f); %SetNativeFlag(f);
} };
DefineError(function Error() { }); DefineError(function Error() { });
DefineError(function TypeError() { }); DefineError(function TypeError() { });
@ -1167,8 +1166,8 @@ $Error.captureStackTrace = captureStackTrace;
// Global list of error objects visited during ErrorToString. This is // Global list of error objects visited during ErrorToString. This is
// used to detect cycles in error toString formatting. // used to detect cycles in error toString formatting.
const visited_errors = new InternalArray(); var visited_errors = new InternalArray();
const cyclic_error_marker = new $Object(); var cyclic_error_marker = new $Object();
function ErrorToStringDetectCycle(error) { function ErrorToStringDetectCycle(error) {
if (!%PushIfAbsent(visited_errors, error)) throw cyclic_error_marker; if (!%PushIfAbsent(visited_errors, error)) throw cyclic_error_marker;
@ -1213,4 +1212,4 @@ InstallFunctions($Error.prototype, DONT_ENUM, ['toString', ErrorToString]);
// Boilerplate for exceptions for stack overflows. Used from // Boilerplate for exceptions for stack overflows. Used from
// Isolate::StackOverflow(). // Isolate::StackOverflow().
const kStackOverflowBoilerplate = MakeRangeError('stack_overflow', []); var kStackOverflowBoilerplate = MakeRangeError('stack_overflow', []);

39
deps/v8/src/mips/builtins-mips.cc

@ -321,7 +321,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
Label* call_generic_code) { Label* call_generic_code) {
Counters* counters = masm->isolate()->counters(); Counters* counters = masm->isolate()->counters();
Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array, Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
has_non_smi_element; has_non_smi_element, finish, cant_transition_map, not_double;
// Check for array construction with zero arguments or one. // Check for array construction with zero arguments or one.
__ Branch(&argc_one_or_more, ne, a0, Operand(zero_reg)); __ Branch(&argc_one_or_more, ne, a0, Operand(zero_reg));
@ -417,14 +417,16 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ mov(t3, sp); __ mov(t3, sp);
__ bind(&loop); __ bind(&loop);
__ lw(a2, MemOperand(t3)); __ lw(a2, MemOperand(t3));
__ Addu(t3, t3, kPointerSize);
if (FLAG_smi_only_arrays) { if (FLAG_smi_only_arrays) {
__ JumpIfNotSmi(a2, &has_non_smi_element); __ JumpIfNotSmi(a2, &has_non_smi_element);
} }
__ Addu(t3, t3, kPointerSize);
__ Addu(t1, t1, -kPointerSize); __ Addu(t1, t1, -kPointerSize);
__ sw(a2, MemOperand(t1)); __ sw(a2, MemOperand(t1));
__ bind(&entry); __ bind(&entry);
__ Branch(&loop, lt, t0, Operand(t1)); __ Branch(&loop, lt, t0, Operand(t1));
__ bind(&finish);
__ mov(sp, t3); __ mov(sp, t3);
// Remove caller arguments and receiver from the stack, setup return value and // Remove caller arguments and receiver from the stack, setup return value and
@ -437,8 +439,39 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ Ret(); __ Ret();
__ bind(&has_non_smi_element); __ bind(&has_non_smi_element);
// Double values are handled by the runtime.
__ CheckMap(
a2, t5, Heap::kHeapNumberMapRootIndex, &not_double, DONT_DO_SMI_CHECK);
__ bind(&cant_transition_map);
__ UndoAllocationInNewSpace(a3, t0); __ UndoAllocationInNewSpace(a3, t0);
__ b(call_generic_code); __ Branch(call_generic_code);
__ bind(&not_double);
// Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
// a3: JSArray
__ lw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
a2,
t5,
&cant_transition_map);
__ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
__ RecordWriteField(a3,
HeapObject::kMapOffset,
a2,
t5,
kRAHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
Label loop2;
__ bind(&loop2);
__ lw(a2, MemOperand(t3));
__ Addu(t3, t3, kPointerSize);
__ Subu(t1, t1, kPointerSize);
__ sw(a2, MemOperand(t1));
__ Branch(&loop2, lt, t0, Operand(t1));
__ Branch(&finish);
} }

60
deps/v8/src/mips/code-stubs-mips.cc

@ -3580,6 +3580,11 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
} }
void InterruptStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kInterrupt, 0, 1);
}
void MathPowStub::Generate(MacroAssembler* masm) { void MathPowStub::Generate(MacroAssembler* masm) {
CpuFeatures::Scope fpu_scope(FPU); CpuFeatures::Scope fpu_scope(FPU);
const Register base = a1; const Register base = a1;
@ -3832,17 +3837,6 @@ void CEntryStub::GenerateAheadOfTime() {
} }
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ Throw(v0);
}
void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type) {
__ ThrowUncatchable(type, v0);
}
void CEntryStub::GenerateCore(MacroAssembler* masm, void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception, Label* throw_normal_exception,
Label* throw_termination_exception, Label* throw_termination_exception,
@ -4033,13 +4027,27 @@ void CEntryStub::Generate(MacroAssembler* masm) {
true); true);
__ bind(&throw_out_of_memory_exception); __ bind(&throw_out_of_memory_exception);
GenerateThrowUncatchable(masm, OUT_OF_MEMORY); // Set external caught exception to false.
Isolate* isolate = masm->isolate();
ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
isolate);
__ li(a0, Operand(false, RelocInfo::NONE));
__ li(a2, Operand(external_caught));
__ sw(a0, MemOperand(a2));
// Set pending exception and v0 to out of memory exception.
Failure* out_of_memory = Failure::OutOfMemoryException();
__ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
__ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ sw(v0, MemOperand(a2));
// Fall through to the next label.
__ bind(&throw_termination_exception); __ bind(&throw_termination_exception);
GenerateThrowUncatchable(masm, TERMINATION); __ ThrowUncatchable(v0);
__ bind(&throw_normal_exception); __ bind(&throw_normal_exception);
GenerateThrowTOS(masm); __ Throw(v0);
} }
@ -5133,10 +5141,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Label termination_exception; Label termination_exception;
__ Branch(&termination_exception, eq, v0, Operand(a0)); __ Branch(&termination_exception, eq, v0, Operand(a0));
__ Throw(v0); // Expects thrown value in v0. __ Throw(v0);
__ bind(&termination_exception); __ bind(&termination_exception);
__ ThrowUncatchable(TERMINATION, v0); // Expects thrown value in v0. __ ThrowUncatchable(v0);
__ bind(&failure); __ bind(&failure);
// For failure and exception return null. // For failure and exception return null.
@ -6058,25 +6066,23 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Utilize delay slots. SmiUntag doesn't emit a jump, everything else is // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
// safe in this case. // safe in this case.
__ UntagAndJumpIfSmi(a2, a2, &runtime); __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
__ UntagAndJumpIfSmi(a3, a3, &runtime); __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
// Both a2 and a3 are untagged integers. // Both a2 and a3 are untagged integers.
__ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0. __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
__ subu(a2, t5, a3); __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
__ Branch(&runtime, gt, a3, Operand(t5)); // Fail if from > to. __ Subu(a2, a2, a3);
// Make sure first argument is a string. // Make sure first argument is a string.
__ lw(v0, MemOperand(sp, kStringOffset)); __ lw(v0, MemOperand(sp, kStringOffset));
__ Branch(&runtime, eq, v0, Operand(kSmiTagMask)); __ JumpIfSmi(v0, &runtime);
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
__ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset)); __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
__ And(t4, v0, Operand(kIsNotStringMask)); __ And(t0, a1, Operand(kIsNotStringMask));
__ Branch(&runtime, ne, t4, Operand(zero_reg)); __ Branch(&runtime, ne, t0, Operand(zero_reg));
// Short-cut for the case of trivial substring. // Short-cut for the case of trivial substring.
Label return_v0; Label return_v0;
@ -7326,11 +7332,13 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
{ a2, a1, a3, EMIT_REMEMBERED_SET }, { a2, a1, a3, EMIT_REMEMBERED_SET },
{ a3, a1, a2, EMIT_REMEMBERED_SET }, { a3, a1, a2, EMIT_REMEMBERED_SET },
// KeyedStoreStubCompiler::GenerateStoreFastElement. // KeyedStoreStubCompiler::GenerateStoreFastElement.
{ t0, a2, a3, EMIT_REMEMBERED_SET }, { a3, a2, t0, EMIT_REMEMBERED_SET },
{ a2, a3, t0, EMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateSmiOnlyToObject // ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject // and ElementsTransitionGenerator::GenerateDoubleToObject
{ a2, a3, t5, EMIT_REMEMBERED_SET }, { a2, a3, t5, EMIT_REMEMBERED_SET },
{ a2, a3, t5, OMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateDoubleToObject // ElementsTransitionGenerator::GenerateDoubleToObject
{ t2, a2, a0, EMIT_REMEMBERED_SET }, { t2, a2, a0, EMIT_REMEMBERED_SET },
{ a2, t2, t5, EMIT_REMEMBERED_SET }, { a2, t2, t5, EMIT_REMEMBERED_SET },

56
deps/v8/src/mips/codegen-mips.cc

@ -89,13 +89,18 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
// -- a3 : target map, scratch for subsequent call // -- a3 : target map, scratch for subsequent call
// -- t0 : scratch (elements) // -- t0 : scratch (elements)
// ----------------------------------- // -----------------------------------
Label loop, entry, convert_hole, gc_required; Label loop, entry, convert_hole, gc_required, only_change_map, done;
bool fpu_supported = CpuFeatures::IsSupported(FPU); bool fpu_supported = CpuFeatures::IsSupported(FPU);
__ push(ra);
Register scratch = t6; Register scratch = t6;
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset)); __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
__ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
__ Branch(&only_change_map, eq, at, Operand(t0));
__ push(ra);
__ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset)); __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
// t0: source FixedArray // t0: source FixedArray
// t1: number of elements (smi-tagged) // t1: number of elements (smi-tagged)
@ -118,7 +123,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
t5, t5,
kRAHasBeenSaved, kRAHasBeenSaved,
kDontSaveFPRegs, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK); OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray. // Replace receiver's backing store with newly created FixedDoubleArray.
__ Addu(a3, t2, Operand(kHeapObjectTag)); __ Addu(a3, t2, Operand(kHeapObjectTag));
@ -149,6 +154,18 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
__ Branch(&entry); __ Branch(&entry);
__ bind(&only_change_map);
__ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
__ RecordWriteField(a2,
HeapObject::kMapOffset,
a3,
t5,
kRAHasBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Branch(&done);
// Call into runtime if GC is required. // Call into runtime if GC is required.
__ bind(&gc_required); __ bind(&gc_required);
__ pop(ra); __ pop(ra);
@ -201,6 +218,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
if (!fpu_supported) __ Pop(a1, a0); if (!fpu_supported) __ Pop(a1, a0);
__ pop(ra); __ pop(ra);
__ bind(&done);
} }
@ -214,10 +232,16 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// -- a3 : target map, scratch for subsequent call // -- a3 : target map, scratch for subsequent call
// -- t0 : scratch (elements) // -- t0 : scratch (elements)
// ----------------------------------- // -----------------------------------
Label entry, loop, convert_hole, gc_required; Label entry, loop, convert_hole, gc_required, only_change_map;
__ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset)); __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
__ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
__ Branch(&only_change_map, eq, at, Operand(t0));
__ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
__ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset)); __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
// t0: source FixedArray // t0: source FixedArray
// t1: number of elements (smi-tagged) // t1: number of elements (smi-tagged)
@ -289,16 +313,6 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ Branch(&loop, lt, a3, Operand(t1)); __ Branch(&loop, lt, a3, Operand(t1));
__ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit()); __ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit());
// Update receiver's map.
__ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
__ RecordWriteField(a2,
HeapObject::kMapOffset,
a3,
t5,
kRAHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created and filled FixedArray. // Replace receiver's backing store with newly created and filled FixedArray.
__ sw(t2, FieldMemOperand(a2, JSObject::kElementsOffset)); __ sw(t2, FieldMemOperand(a2, JSObject::kElementsOffset));
__ RecordWriteField(a2, __ RecordWriteField(a2,
@ -310,6 +324,18 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
EMIT_REMEMBERED_SET, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK); OMIT_SMI_CHECK);
__ pop(ra); __ pop(ra);
__ bind(&only_change_map);
// Update receiver's map.
__ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
__ RecordWriteField(a2,
HeapObject::kMapOffset,
a3,
t5,
kRAHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
} }

50
deps/v8/src/mips/deoptimizer-mips.cc

@ -36,9 +36,6 @@ namespace v8 {
namespace internal { namespace internal {
const int Deoptimizer::table_entry_size_ = 32;
int Deoptimizer::patch_size() { int Deoptimizer::patch_size() {
const int kCallInstructionSizeInWords = 4; const int kCallInstructionSizeInWords = 4;
return kCallInstructionSizeInWords * Assembler::kInstrSize; return kCallInstructionSizeInWords * Assembler::kInstrSize;
@ -839,32 +836,55 @@ void Deoptimizer::EntryGenerator::Generate() {
} }
// Maximum size of a table entry generated below.
const int Deoptimizer::table_entry_size_ = 12 * Assembler::kInstrSize;
void Deoptimizer::TableEntryGenerator::GeneratePrologue() { void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm()); Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
// Create a sequence of deoptimization entries. Note that any // Create a sequence of deoptimization entries. Note that any
// registers may be still live. // registers may be still live.
Label table_start;
Label done; __ bind(&table_start);
for (int i = 0; i < count(); i++) { for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset(); Label start;
USE(start); __ bind(&start);
if (type() != EAGER) { if (type() != EAGER) {
// Emulate ia32 like call by pushing return address to stack. // Emulate ia32 like call by pushing return address to stack.
__ push(ra); __ addiu(sp, sp, -3 * kPointerSize);
} __ sw(ra, MemOperand(sp, 2 * kPointerSize));
__ li(at, Operand(i)); } else {
__ push(at); __ addiu(sp, sp, -2 * kPointerSize);
__ Branch(&done); }
// Using ori makes sure only one instruction is generated. This will work
// as long as the number of deopt entries is below 2^16.
__ ori(at, zero_reg, i);
__ sw(at, MemOperand(sp, kPointerSize));
__ sw(ra, MemOperand(sp, 0));
// This branch instruction only jumps over one instruction, and that is
// executed in the delay slot. The result is that execution is linear but
// the ra register is updated.
__ bal(1);
// Jump over the remaining deopt entries (including this one).
// Only include the remaining part of the current entry in the calculation.
const int remaining_entries = (count() - i) * table_entry_size_;
const int cur_size = masm()->SizeOfCodeGeneratedSince(&start);
// ra points to the instruction after the delay slot. Adjust by 4.
__ Addu(at, ra, remaining_entries - cur_size - Assembler::kInstrSize);
__ lw(ra, MemOperand(sp, 0));
__ jr(at); // Expose delay slot.
__ addiu(sp, sp, kPointerSize); // In delay slot.
// Pad the rest of the code. // Pad the rest of the code.
while (table_entry_size_ > (masm()->pc_offset() - start)) { while (table_entry_size_ > (masm()->SizeOfCodeGeneratedSince(&start))) {
__ nop(); __ nop();
} }
ASSERT_EQ(table_entry_size_, masm()->pc_offset() - start); ASSERT_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
} }
__ bind(&done);
ASSERT_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
count() * table_entry_size_);
} }
#undef __ #undef __

50
deps/v8/src/mips/full-codegen-mips.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -133,10 +133,8 @@ class JumpPatchSite BASE_EMBEDDED {
// //
// The function builds a JS frame. Please see JavaScriptFrameConstants in // The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-mips.h for its layout. // frames-mips.h for its layout.
void FullCodeGenerator::Generate(CompilationInfo* info) { void FullCodeGenerator::Generate() {
ASSERT(info_ == NULL); CompilationInfo* info = info_;
info_ = info;
scope_ = info->scope();
handler_table_ = handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
SetFunctionPosition(function()); SetFunctionPosition(function());
@ -149,6 +147,27 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
} }
#endif #endif
// We can optionally optimize based on counters rather than statistical
// sampling.
if (info->ShouldSelfOptimize()) {
if (FLAG_trace_opt_verbose) {
PrintF("[adding self-optimization header to %s]\n",
*info->function()->debug_name()->ToCString());
}
MaybeObject* maybe_cell = isolate()->heap()->AllocateJSGlobalPropertyCell(
Smi::FromInt(Compiler::kCallsUntilPrimitiveOpt));
JSGlobalPropertyCell* cell;
if (maybe_cell->To(&cell)) {
__ li(a2, Handle<JSGlobalPropertyCell>(cell));
__ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
__ Subu(a3, a3, Operand(Smi::FromInt(1)));
__ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
Handle<Code> compile_stub(
isolate()->builtins()->builtin(Builtins::kLazyRecompile));
__ Jump(compile_stub, RelocInfo::CODE_TARGET, eq, a3, Operand(zero_reg));
}
}
// Strict mode functions and builtins need to replace the receiver // Strict mode functions and builtins need to replace the receiver
// with undefined when called as functions (without an explicit // with undefined when called as functions (without an explicit
// receiver object). t1 is zero for method calls and non-zero for // receiver object). t1 is zero for method calls and non-zero for
@ -274,11 +293,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// For named function expressions, declare the function name as a // For named function expressions, declare the function name as a
// constant. // constant.
if (scope()->is_function_scope() && scope()->function() != NULL) { if (scope()->is_function_scope() && scope()->function() != NULL) {
int ignored = 0;
VariableProxy* proxy = scope()->function(); VariableProxy* proxy = scope()->function();
ASSERT(proxy->var()->mode() == CONST || ASSERT(proxy->var()->mode() == CONST ||
proxy->var()->mode() == CONST_HARMONY); proxy->var()->mode() == CONST_HARMONY);
EmitDeclaration(proxy, proxy->var()->mode(), NULL, &ignored); ASSERT(proxy->var()->location() != Variable::UNALLOCATED);
EmitDeclaration(proxy, proxy->var()->mode(), NULL);
} }
VisitDeclarations(scope()->declarations()); VisitDeclarations(scope()->declarations());
} }
@ -315,7 +334,8 @@ void FullCodeGenerator::ClearAccumulator() {
} }
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) { void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
// The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need // The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
// to make sure it is constant. Branch may emit a skip-or-jump sequence // to make sure it is constant. Branch may emit a skip-or-jump sequence
// instead of the normal Branch. It seems that the "skip" part of that // instead of the normal Branch. It seems that the "skip" part of that
@ -716,8 +736,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
VariableMode mode, VariableMode mode,
FunctionLiteral* function, FunctionLiteral* function) {
int* global_count) {
// If it was not possible to allocate the variable at compile time, we // If it was not possible to allocate the variable at compile time, we
// need to "declare" it at runtime to make sure it actually exists in the // need to "declare" it at runtime to make sure it actually exists in the
// local context. // local context.
@ -726,7 +745,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
(mode == CONST || mode == CONST_HARMONY || mode == LET); (mode == CONST || mode == CONST_HARMONY || mode == LET);
switch (variable->location()) { switch (variable->location()) {
case Variable::UNALLOCATED: case Variable::UNALLOCATED:
++(*global_count); ++global_count_;
break; break;
case Variable::PARAMETER: case Variable::PARAMETER:
@ -814,9 +833,6 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
} }
void FullCodeGenerator::VisitDeclaration(Declaration* decl) { }
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals. // Call the runtime to declare the globals.
// The context is the first argument. // The context is the first argument.
@ -1098,7 +1114,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Addu(a0, a0, Operand(Smi::FromInt(1))); __ Addu(a0, a0, Operand(Smi::FromInt(1)));
__ push(a0); __ push(a0);
EmitStackCheck(stmt); EmitStackCheck(stmt, &loop);
__ Branch(&loop); __ Branch(&loop);
// Remove the pointers stored on the stack. // Remove the pointers stored on the stack.
@ -1516,7 +1532,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Smi::FromInt(0))); Smi::FromInt(0)));
__ push(a1); __ push(a1);
VisitForStackValue(value); VisitForStackValue(value);
__ CallRuntime(Runtime::kDefineAccessor, 4); __ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
__ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
break; break;
} }
} }

80
deps/v8/src/mips/lithium-codegen-mips.cc

@ -2764,6 +2764,15 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) {
} }
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
__ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
__ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
// The context is the first argument.
__ Push(cp, scratch0(), scratch1());
CallRuntime(Runtime::kDeclareGlobals, 3, instr);
}
void LCodeGen::DoGlobalObject(LGlobalObject* instr) { void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register context = ToRegister(instr->context()); Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
@ -4272,26 +4281,35 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
ASSERT(!source.is(a2)); ASSERT(!source.is(a2));
ASSERT(!result.is(a2)); ASSERT(!result.is(a2));
// Only elements backing stores for non-COW arrays need to be copied.
Handle<FixedArrayBase> elements(object->elements());
bool has_elements = elements->length() > 0 &&
elements->map() != isolate()->heap()->fixed_cow_array_map();
// Increase the offset so that subsequent objects end up right after // Increase the offset so that subsequent objects end up right after
// this one. // this object and its backing store.
int current_offset = *offset; int object_offset = *offset;
int size = object->map()->instance_size(); int object_size = object->map()->instance_size();
*offset += size; int elements_offset = *offset + object_size;
int elements_size = has_elements ? elements->Size() : 0;
*offset += object_size + elements_size;
// Copy object header. // Copy object header.
ASSERT(object->properties()->length() == 0); ASSERT(object->properties()->length() == 0);
ASSERT(object->elements()->length() == 0 ||
object->elements()->map() == isolate()->heap()->fixed_cow_array_map());
int inobject_properties = object->map()->inobject_properties(); int inobject_properties = object->map()->inobject_properties();
int header_size = size - inobject_properties * kPointerSize; int header_size = object_size - inobject_properties * kPointerSize;
for (int i = 0; i < header_size; i += kPointerSize) { for (int i = 0; i < header_size; i += kPointerSize) {
if (has_elements && i == JSObject::kElementsOffset) {
__ Addu(a2, result, Operand(elements_offset));
} else {
__ lw(a2, FieldMemOperand(source, i)); __ lw(a2, FieldMemOperand(source, i));
__ sw(a2, FieldMemOperand(result, current_offset + i)); }
__ sw(a2, FieldMemOperand(result, object_offset + i));
} }
// Copy in-object properties. // Copy in-object properties.
for (int i = 0; i < inobject_properties; i++) { for (int i = 0; i < inobject_properties; i++) {
int total_offset = current_offset + object->GetInObjectPropertyOffset(i); int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i)); Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
if (value->IsJSObject()) { if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value); Handle<JSObject> value_object = Handle<JSObject>::cast(value);
@ -4307,10 +4325,42 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
__ sw(a2, FieldMemOperand(result, total_offset)); __ sw(a2, FieldMemOperand(result, total_offset));
} }
} }
// Copy elements backing store header.
ASSERT(!has_elements || elements->IsFixedArray());
if (has_elements) {
__ LoadHeapObject(source, elements);
for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
__ lw(a2, FieldMemOperand(source, i));
__ sw(a2, FieldMemOperand(result, elements_offset + i));
}
}
// Copy elements backing store content.
ASSERT(!has_elements || elements->IsFixedArray());
int elements_length = has_elements ? elements->length() : 0;
for (int i = 0; i < elements_length; i++) {
int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
Handle<Object> value = JSObject::GetElement(object, i);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ Addu(a2, result, Operand(*offset));
__ sw(a2, FieldMemOperand(result, total_offset));
__ LoadHeapObject(source, value_object);
EmitDeepCopy(value_object, result, source, offset);
} else if (value->IsHeapObject()) {
__ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
__ sw(a2, FieldMemOperand(result, total_offset));
} else {
__ li(a2, Operand(value));
__ sw(a2, FieldMemOperand(result, total_offset));
}
}
} }
void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) { void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
int size = instr->hydrogen()->total_size(); int size = instr->hydrogen()->total_size();
// Allocate all objects that are part of the literal in one big // Allocate all objects that are part of the literal in one big
@ -4332,14 +4382,14 @@ void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
} }
void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) { void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
ASSERT(ToRegister(instr->result()).is(v0)); ASSERT(ToRegister(instr->result()).is(v0));
Handle<FixedArray> literals(instr->environment()->closure()->literals());
Handle<FixedArray> constant_properties = Handle<FixedArray> constant_properties =
instr->hydrogen()->constant_properties(); instr->hydrogen()->constant_properties();
__ lw(t0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); // Set up the parameters to the stub/runtime call.
__ lw(t0, FieldMemOperand(t0, JSFunction::kLiteralsOffset)); __ LoadHeapObject(t0, literals);
__ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); __ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
__ li(a2, Operand(constant_properties)); __ li(a2, Operand(constant_properties));
int flags = instr->hydrogen()->fast_elements() int flags = instr->hydrogen()->fast_elements()
@ -4348,7 +4398,7 @@ void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
__ li(a1, Operand(Smi::FromInt(flags))); __ li(a1, Operand(Smi::FromInt(flags)));
__ Push(t0, a3, a2, a1); __ Push(t0, a3, a2, a1);
// Pick the right runtime function to call. // Pick the right runtime function or stub to call.
int properties_count = constant_properties->length() / 2; int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) { if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr); CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);

18
deps/v8/src/mips/lithium-mips.cc

@ -1124,6 +1124,11 @@ LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
} }
LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
return MarkAsCall(new LDeclareGlobals, instr);
}
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) { LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
LOperand* context = UseRegisterAtStart(instr->value()); LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LGlobalObject(context)); return DefineAsRegister(new LGlobalObject(context));
@ -2093,19 +2098,18 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
} }
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) { LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
return MarkAsCall(DefineFixed(new LArrayLiteral, v0), instr); return MarkAsCall(DefineFixed(new LFastLiteral, v0), instr);
} }
LInstruction* LChunkBuilder::DoObjectLiteralFast(HObjectLiteralFast* instr) { LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
return MarkAsCall(DefineFixed(new LObjectLiteralFast, v0), instr); return MarkAsCall(DefineFixed(new LArrayLiteral, v0), instr);
} }
LInstruction* LChunkBuilder::DoObjectLiteralGeneric( LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
HObjectLiteralGeneric* instr) { return MarkAsCall(DefineFixed(new LObjectLiteral, v0), instr);
return MarkAsCall(DefineFixed(new LObjectLiteralGeneric, v0), instr);
} }

30
deps/v8/src/mips/lithium-mips.h

@ -87,11 +87,13 @@ class LCodeGen;
V(ConstantI) \ V(ConstantI) \
V(ConstantT) \ V(ConstantT) \
V(Context) \ V(Context) \
V(DeclareGlobals) \
V(DeleteProperty) \ V(DeleteProperty) \
V(Deoptimize) \ V(Deoptimize) \
V(DivI) \ V(DivI) \
V(DoubleToI) \ V(DoubleToI) \
V(ElementsKind) \ V(ElementsKind) \
V(FastLiteral) \
V(FixedArrayBaseLength) \ V(FixedArrayBaseLength) \
V(FunctionLiteral) \ V(FunctionLiteral) \
V(GetCachedArrayIndex) \ V(GetCachedArrayIndex) \
@ -134,8 +136,7 @@ class LCodeGen;
V(NumberTagD) \ V(NumberTagD) \
V(NumberTagI) \ V(NumberTagI) \
V(NumberUntagD) \ V(NumberUntagD) \
V(ObjectLiteralFast) \ V(ObjectLiteral) \
V(ObjectLiteralGeneric) \
V(OsrEntry) \ V(OsrEntry) \
V(OuterContext) \ V(OuterContext) \
V(Parameter) \ V(Parameter) \
@ -1346,6 +1347,13 @@ class LOuterContext: public LTemplateInstruction<1, 1, 0> {
}; };
class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
};
class LGlobalObject: public LTemplateInstruction<1, 1, 0> { class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
public: public:
explicit LGlobalObject(LOperand* context) { explicit LGlobalObject(LOperand* context) {
@ -1909,24 +1917,24 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
}; };
class LArrayLiteral: public LTemplateInstruction<1, 0, 0> { class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
public: public:
DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal") DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral) DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
}; };
class LObjectLiteralFast: public LTemplateInstruction<1, 0, 0> { class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
public: public:
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast, "object-literal-fast") DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralFast) DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
}; };
class LObjectLiteralGeneric: public LTemplateInstruction<1, 0, 0> { class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
public: public:
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric, "object-literal-generic") DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralGeneric) DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
}; };

20
deps/v8/src/mips/macro-assembler-mips.cc

@ -2679,8 +2679,7 @@ void MacroAssembler::Throw(Register value) {
} }
void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type, void MacroAssembler::ThrowUncatchable(Register value) {
Register value) {
// Adjust this code if not the case. // Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
@ -2690,24 +2689,9 @@ void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The exception is expected in v0. // The exception is expected in v0.
if (type == OUT_OF_MEMORY) { if (!value.is(v0)) {
// Set external caught exception to false.
ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
isolate());
li(a0, Operand(false, RelocInfo::NONE));
li(a2, Operand(external_caught));
sw(a0, MemOperand(a2));
// Set pending exception and v0 to out of memory exception.
Failure* out_of_memory = Failure::OutOfMemoryException();
li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate())));
sw(v0, MemOperand(a2));
} else if (!value.is(v0)) {
mov(v0, value); mov(v0, value);
} }
// Drop the stack pointer to the top of the top stack handler. // Drop the stack pointer to the top of the top stack handler.
li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
lw(sp, MemOperand(a3)); lw(sp, MemOperand(a3));

4
deps/v8/src/mips/macro-assembler-mips.h

@ -871,12 +871,12 @@ class MacroAssembler: public Assembler {
// Must preserve the result register. // Must preserve the result register.
void PopTryHandler(); void PopTryHandler();
// Passes thrown value (in v0) to the handler of top of the try handler chain. // Passes thrown value to the handler of top of the try handler chain.
void Throw(Register value); void Throw(Register value);
// Propagates an uncatchable exception to the top of the current JS stack's // Propagates an uncatchable exception to the top of the current JS stack's
// handler chain. // handler chain.
void ThrowUncatchable(UncatchableExceptionType type, Register value); void ThrowUncatchable(Register value);
// Copies a fixed number of fields of heap objects from src to dst. // Copies a fixed number of fields of heap objects from src to dst.
void CopyFields(Register dst, Register src, RegList temps, int field_count); void CopyFields(Register dst, Register src, RegList temps, int field_count);

4
deps/v8/src/mips/simulator-mips.cc

@ -1369,9 +1369,9 @@ void Simulator::WriteB(int32_t addr, int8_t value) {
// Returns the limit of the stack area to enable checking for stack overflows. // Returns the limit of the stack area to enable checking for stack overflows.
uintptr_t Simulator::StackLimit() const { uintptr_t Simulator::StackLimit() const {
// Leave a safety margin of 512 bytes to prevent overrunning the stack when // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
// pushing values. // pushing values.
return reinterpret_cast<uintptr_t>(stack_) + 512; return reinterpret_cast<uintptr_t>(stack_) + 1024;
} }

188
deps/v8/src/mips/stub-cache-mips.cc

@ -3058,7 +3058,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
ElementsKind elements_kind = receiver_map->elements_kind(); ElementsKind elements_kind = receiver_map->elements_kind();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub = Handle<Code> stub =
KeyedStoreElementStub(is_js_array, elements_kind).GetCode(); KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
__ DispatchMap(a2, a3, receiver_map, stub, DO_SMI_CHECK); __ DispatchMap(a2, a3, receiver_map, stub, DO_SMI_CHECK);
@ -4168,7 +4168,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
void KeyedStoreStubCompiler::GenerateStoreFastElement( void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm, MacroAssembler* masm,
bool is_js_array, bool is_js_array,
ElementsKind elements_kind) { ElementsKind elements_kind,
KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- a0 : value // -- a0 : value
// -- a1 : key // -- a1 : key
@ -4177,15 +4178,17 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// -- a3 : scratch // -- a3 : scratch
// -- a4 : scratch (elements) // -- a4 : scratch (elements)
// ----------------------------------- // -----------------------------------
Label miss_force_generic, transition_elements_kind; Label miss_force_generic, transition_elements_kind, grow, slow;
Label finish_store, check_capacity;
Register value_reg = a0; Register value_reg = a0;
Register key_reg = a1; Register key_reg = a1;
Register receiver_reg = a2; Register receiver_reg = a2;
Register scratch = a3; Register scratch = t0;
Register elements_reg = t0; Register elements_reg = a3;
Register scratch2 = t1; Register length_reg = t1;
Register scratch3 = t2; Register scratch2 = t2;
Register scratch3 = t3;
// This stub is meant to be tail-jumped to, the receiver must already // This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi. // have been verified by the caller to not be a smi.
@ -4193,26 +4196,35 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Check that the key is a smi. // Check that the key is a smi.
__ JumpIfNotSmi(key_reg, &miss_force_generic); __ JumpIfNotSmi(key_reg, &miss_force_generic);
// Get the elements array and make sure it is a fast element array, not 'cow'. if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ lw(elements_reg, __ JumpIfNotSmi(value_reg, &transition_elements_kind);
FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); }
__ CheckMap(elements_reg,
scratch,
Heap::kFixedArrayMapRootIndex,
&miss_force_generic,
DONT_DO_SMI_CHECK);
// Check that the key is within bounds. // Check that the key is within bounds.
__ lw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
if (is_js_array) { if (is_js_array) {
__ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
} else { } else {
__ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
} }
// Compare smis. // Compare smis.
if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
__ Branch(&grow, hs, key_reg, Operand(scratch));
} else {
__ Branch(&miss_force_generic, hs, key_reg, Operand(scratch)); __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
}
// Make sure elements is a fast element array, not 'cow'.
__ CheckMap(elements_reg,
scratch,
Heap::kFixedArrayMapRootIndex,
&miss_force_generic,
DONT_DO_SMI_CHECK);
__ bind(&finish_store);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind);
__ Addu(scratch, __ Addu(scratch,
elements_reg, elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag)); Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@ -4249,12 +4261,79 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ bind(&transition_elements_kind); __ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic_miss, RelocInfo::CODE_TARGET); __ Jump(ic_miss, RelocInfo::CODE_TARGET);
if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
// Grow the array by a single element if possible.
__ bind(&grow);
// Make sure the array is only growing by a single element, anything else
// must be handled by the runtime.
__ Branch(&miss_force_generic, ne, key_reg, Operand(scratch));
// Check for the empty array, and preallocate a small backing store if
// possible.
__ lw(length_reg,
FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ lw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
__ Branch(&check_capacity, ne, elements_reg, Operand(at));
int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
__ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow,
TAG_OBJECT);
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
__ sw(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
__ li(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
__ sw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
__ sw(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
}
// Store the element at index zero.
__ sw(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
// Install the new backing store in the JSArray.
__ sw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
scratch, kRAHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Increment the length of the array.
__ li(length_reg, Operand(Smi::FromInt(1)));
__ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ Ret();
__ bind(&check_capacity);
// Check for cow elements, in general they are not handled by this stub
__ CheckMap(elements_reg,
scratch,
Heap::kFixedCOWArrayMapRootIndex,
&miss_force_generic,
DONT_DO_SMI_CHECK);
__ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
__ Branch(&slow, hs, length_reg, Operand(scratch));
// Grow the array and finish the store.
__ Addu(length_reg, length_reg, Operand(Smi::FromInt(1)));
__ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ jmp(&finish_store);
__ bind(&slow);
Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
__ Jump(ic_slow, RelocInfo::CODE_TARGET);
}
} }
void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
MacroAssembler* masm, MacroAssembler* masm,
bool is_js_array) { bool is_js_array,
KeyedAccessGrowMode grow_mode) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- a0 : value // -- a0 : value
// -- a1 : key // -- a1 : key
@ -4266,7 +4345,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- t2 : scratch (exponent_reg) // -- t2 : scratch (exponent_reg)
// -- t3 : scratch4 // -- t3 : scratch4
// ----------------------------------- // -----------------------------------
Label miss_force_generic, transition_elements_kind; Label miss_force_generic, transition_elements_kind, grow, slow;
Label finish_store, check_capacity;
Register value_reg = a0; Register value_reg = a0;
Register key_reg = a1; Register key_reg = a1;
@ -4276,6 +4356,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Register scratch2 = t1; Register scratch2 = t1;
Register scratch3 = t2; Register scratch3 = t2;
Register scratch4 = t3; Register scratch4 = t3;
Register length_reg = t3;
// This stub is meant to be tail-jumped to, the receiver must already // This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi. // have been verified by the caller to not be a smi.
@ -4293,7 +4374,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
} }
// Compare smis, unsigned compare catches both negative and out-of-bound // Compare smis, unsigned compare catches both negative and out-of-bound
// indexes. // indexes.
if (grow_mode == ALLOW_JSARRAY_GROWTH) {
__ Branch(&grow, hs, key_reg, Operand(scratch1));
} else {
__ Branch(&miss_force_generic, hs, key_reg, Operand(scratch1)); __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch1));
}
__ bind(&finish_store);
__ StoreNumberToDoubleElements(value_reg, __ StoreNumberToDoubleElements(value_reg,
key_reg, key_reg,
@ -4317,6 +4404,71 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ bind(&transition_elements_kind); __ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic_miss, RelocInfo::CODE_TARGET); __ Jump(ic_miss, RelocInfo::CODE_TARGET);
if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
// Grow the array by a single element if possible.
__ bind(&grow);
// Make sure the array is only growing by a single element, anything else
// must be handled by the runtime.
__ Branch(&miss_force_generic, ne, key_reg, Operand(scratch1));
// Transition on values that can't be stored in a FixedDoubleArray.
Label value_is_smi;
__ JumpIfSmi(value_reg, &value_is_smi);
__ lw(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
__ Branch(&transition_elements_kind, ne, scratch1, Operand(at));
__ bind(&value_is_smi);
// Check for the empty array, and preallocate a small backing store if
// possible.
__ lw(length_reg,
FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ lw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
__ Branch(&check_capacity, ne, elements_reg, Operand(at));
int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
__ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
TAG_OBJECT);
// Initialize the new FixedDoubleArray. Leave elements unitialized for
// efficiency, they are guaranteed to be initialized before use.
__ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
__ sw(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
__ li(scratch1, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
__ sw(scratch1,
FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
// Install the new backing store in the JSArray.
__ sw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Increment the length of the array.
__ li(length_reg, Operand(Smi::FromInt(1)));
__ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ jmp(&finish_store);
__ bind(&check_capacity);
// Make sure that the backing store can hold additional elements.
__ lw(scratch1,
FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
__ Branch(&slow, hs, length_reg, Operand(scratch1));
// Grow the array and finish the store.
__ Addu(length_reg, length_reg, Operand(Smi::FromInt(1)));
__ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ jmp(&finish_store);
__ bind(&slow);
Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
__ Jump(ic_slow, RelocInfo::CODE_TARGET);
}
} }

74
deps/v8/src/mirror-debugger.js

@ -144,32 +144,32 @@ function inherits(ctor, superCtor) {
// Type names of the different mirrors. // Type names of the different mirrors.
const UNDEFINED_TYPE = 'undefined'; var UNDEFINED_TYPE = 'undefined';
const NULL_TYPE = 'null'; var NULL_TYPE = 'null';
const BOOLEAN_TYPE = 'boolean'; var BOOLEAN_TYPE = 'boolean';
const NUMBER_TYPE = 'number'; var NUMBER_TYPE = 'number';
const STRING_TYPE = 'string'; var STRING_TYPE = 'string';
const OBJECT_TYPE = 'object'; var OBJECT_TYPE = 'object';
const FUNCTION_TYPE = 'function'; var FUNCTION_TYPE = 'function';
const REGEXP_TYPE = 'regexp'; var REGEXP_TYPE = 'regexp';
const ERROR_TYPE = 'error'; var ERROR_TYPE = 'error';
const PROPERTY_TYPE = 'property'; var PROPERTY_TYPE = 'property';
const FRAME_TYPE = 'frame'; var FRAME_TYPE = 'frame';
const SCRIPT_TYPE = 'script'; var SCRIPT_TYPE = 'script';
const CONTEXT_TYPE = 'context'; var CONTEXT_TYPE = 'context';
const SCOPE_TYPE = 'scope'; var SCOPE_TYPE = 'scope';
// Maximum length when sending strings through the JSON protocol. // Maximum length when sending strings through the JSON protocol.
const kMaxProtocolStringLength = 80; var kMaxProtocolStringLength = 80;
// Different kind of properties. // Different kind of properties.
PropertyKind = {}; var PropertyKind = {};
PropertyKind.Named = 1; PropertyKind.Named = 1;
PropertyKind.Indexed = 2; PropertyKind.Indexed = 2;
// A copy of the PropertyType enum from global.h // A copy of the PropertyType enum from global.h
PropertyType = {}; var PropertyType = {};
PropertyType.Normal = 0; PropertyType.Normal = 0;
PropertyType.Field = 1; PropertyType.Field = 1;
PropertyType.ConstantFunction = 2; PropertyType.ConstantFunction = 2;
@ -183,7 +183,7 @@ PropertyType.NullDescriptor = 9;
// Different attributes for a property. // Different attributes for a property.
PropertyAttribute = {}; var PropertyAttribute = {};
PropertyAttribute.None = NONE; PropertyAttribute.None = NONE;
PropertyAttribute.ReadOnly = READ_ONLY; PropertyAttribute.ReadOnly = READ_ONLY;
PropertyAttribute.DontEnum = DONT_ENUM; PropertyAttribute.DontEnum = DONT_ENUM;
@ -191,7 +191,7 @@ PropertyAttribute.DontDelete = DONT_DELETE;
// A copy of the scope types from runtime.cc. // A copy of the scope types from runtime.cc.
ScopeType = { Global: 0, var ScopeType = { Global: 0,
Local: 1, Local: 1,
With: 2, With: 2,
Closure: 3, Closure: 3,
@ -1237,24 +1237,24 @@ PropertyMirror.prototype.isNative = function() {
}; };
const kFrameDetailsFrameIdIndex = 0; var kFrameDetailsFrameIdIndex = 0;
const kFrameDetailsReceiverIndex = 1; var kFrameDetailsReceiverIndex = 1;
const kFrameDetailsFunctionIndex = 2; var kFrameDetailsFunctionIndex = 2;
const kFrameDetailsArgumentCountIndex = 3; var kFrameDetailsArgumentCountIndex = 3;
const kFrameDetailsLocalCountIndex = 4; var kFrameDetailsLocalCountIndex = 4;
const kFrameDetailsSourcePositionIndex = 5; var kFrameDetailsSourcePositionIndex = 5;
const kFrameDetailsConstructCallIndex = 6; var kFrameDetailsConstructCallIndex = 6;
const kFrameDetailsAtReturnIndex = 7; var kFrameDetailsAtReturnIndex = 7;
const kFrameDetailsFlagsIndex = 8; var kFrameDetailsFlagsIndex = 8;
const kFrameDetailsFirstDynamicIndex = 9; var kFrameDetailsFirstDynamicIndex = 9;
const kFrameDetailsNameIndex = 0; var kFrameDetailsNameIndex = 0;
const kFrameDetailsValueIndex = 1; var kFrameDetailsValueIndex = 1;
const kFrameDetailsNameValueSize = 2; var kFrameDetailsNameValueSize = 2;
const kFrameDetailsFlagDebuggerFrameMask = 1 << 0; var kFrameDetailsFlagDebuggerFrameMask = 1 << 0;
const kFrameDetailsFlagOptimizedFrameMask = 1 << 1; var kFrameDetailsFlagOptimizedFrameMask = 1 << 1;
const kFrameDetailsFlagInlinedFrameIndexMask = 7 << 2; var kFrameDetailsFlagInlinedFrameIndexMask = 7 << 2;
/** /**
* Wrapper for the frame details information retreived from the VM. The frame * Wrapper for the frame details information retreived from the VM. The frame
@ -1732,8 +1732,8 @@ FrameMirror.prototype.toText = function(opt_locals) {
}; };
const kScopeDetailsTypeIndex = 0; var kScopeDetailsTypeIndex = 0;
const kScopeDetailsObjectIndex = 1; var kScopeDetailsObjectIndex = 1;
function ScopeDetails(frame, index) { function ScopeDetails(frame, index) {
this.break_id_ = frame.break_id_; this.break_id_ = frame.break_id_;

13
deps/v8/src/objects-debug.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -280,7 +280,9 @@ void JSObject::JSObjectVerify() {
(map()->inobject_properties() + properties()->length() - (map()->inobject_properties() + properties()->length() -
map()->NextFreePropertyIndex())); map()->NextFreePropertyIndex()));
} }
ASSERT_EQ((map()->has_fast_elements() || map()->has_fast_smi_only_elements()), ASSERT_EQ((map()->has_fast_elements() ||
map()->has_fast_smi_only_elements() ||
(elements() == GetHeap()->empty_fixed_array())),
(elements()->map() == GetHeap()->fixed_array_map() || (elements()->map() == GetHeap()->fixed_array_map() ||
elements()->map() == GetHeap()->fixed_cow_array_map())); elements()->map() == GetHeap()->fixed_cow_array_map()));
ASSERT(map()->has_fast_elements() == HasFastElements()); ASSERT(map()->has_fast_elements() == HasFastElements());
@ -324,6 +326,13 @@ void PolymorphicCodeCache::PolymorphicCodeCacheVerify() {
} }
void TypeFeedbackInfo::TypeFeedbackInfoVerify() {
VerifyObjectField(kIcTotalCountOffset);
VerifyObjectField(kIcWithTypeinfoCountOffset);
VerifyHeapPointer(type_feedback_cells());
}
void FixedArray::FixedArrayVerify() { void FixedArray::FixedArrayVerify() {
for (int i = 0; i < length(); i++) { for (int i = 0; i < length(); i++) {
Object* e = get(i); Object* e = get(i);

40
deps/v8/src/objects-inl.h

@ -1339,11 +1339,12 @@ void JSObject::set_map_and_elements(Map* new_map,
} }
} }
ASSERT((map()->has_fast_elements() || ASSERT((map()->has_fast_elements() ||
map()->has_fast_smi_only_elements()) == map()->has_fast_smi_only_elements() ||
(value == GetHeap()->empty_fixed_array())) ==
(value->map() == GetHeap()->fixed_array_map() || (value->map() == GetHeap()->fixed_array_map() ||
value->map() == GetHeap()->fixed_cow_array_map())); value->map() == GetHeap()->fixed_cow_array_map()));
ASSERT(map()->has_fast_double_elements() == ASSERT((value == GetHeap()->empty_fixed_array()) ||
value->IsFixedDoubleArray()); (map()->has_fast_double_elements() == value->IsFixedDoubleArray()));
WRITE_FIELD(this, kElementsOffset, value); WRITE_FIELD(this, kElementsOffset, value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode); CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode);
} }
@ -2052,16 +2053,6 @@ void DescriptorArray::Set(int descriptor_number,
} }
void DescriptorArray::CopyFrom(int index,
DescriptorArray* src,
int src_index,
const WhitenessWitness& witness) {
Descriptor desc;
src->Get(src_index, &desc);
Set(index, &desc, witness);
}
void DescriptorArray::NoIncrementalWriteBarrierSwapDescriptors( void DescriptorArray::NoIncrementalWriteBarrierSwapDescriptors(
int first, int second) { int first, int second) {
NoIncrementalWriteBarrierSwap(this, ToKeyIndex(first), ToKeyIndex(second)); NoIncrementalWriteBarrierSwap(this, ToKeyIndex(first), ToKeyIndex(second));
@ -3715,8 +3706,9 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints,
kNameShouldPrintAsAnonymous) kNameShouldPrintAsAnonymous)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, bound, kBoundFunction) BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, bound, kBoundFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_anonymous, kIsAnonymous) BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_anonymous, kIsAnonymous)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_crankshaft, BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_function, kIsFunction)
kDontCrankshaft) BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_optimize,
kDontOptimize)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_inline, kDontInline) BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_inline, kDontInline)
ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset) ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
@ -3946,13 +3938,17 @@ MaybeObject* JSFunction::set_initial_map_and_cache_transitions(
Map* new_double_map = NULL; Map* new_double_map = NULL;
if (!maybe_map->To<Map>(&new_double_map)) return maybe_map; if (!maybe_map->To<Map>(&new_double_map)) return maybe_map;
new_double_map->set_elements_kind(FAST_DOUBLE_ELEMENTS); new_double_map->set_elements_kind(FAST_DOUBLE_ELEMENTS);
initial_map->AddElementsTransition(FAST_DOUBLE_ELEMENTS, new_double_map); maybe_map = initial_map->AddElementsTransition(FAST_DOUBLE_ELEMENTS,
new_double_map);
if (maybe_map->IsFailure()) return maybe_map;
maybe_map = new_double_map->CopyDropTransitions(); maybe_map = new_double_map->CopyDropTransitions();
Map* new_object_map = NULL; Map* new_object_map = NULL;
if (!maybe_map->To<Map>(&new_object_map)) return maybe_map; if (!maybe_map->To<Map>(&new_object_map)) return maybe_map;
new_object_map->set_elements_kind(FAST_ELEMENTS); new_object_map->set_elements_kind(FAST_ELEMENTS);
new_double_map->AddElementsTransition(FAST_ELEMENTS, new_object_map); maybe_map = new_double_map->AddElementsTransition(FAST_ELEMENTS,
new_object_map);
if (maybe_map->IsFailure()) return maybe_map;
global_context->set_smi_js_array_map(initial_map); global_context->set_smi_js_array_map(initial_map);
global_context->set_double_js_array_map(new_double_map); global_context->set_double_js_array_map(new_double_map);
@ -4127,8 +4123,7 @@ INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset) ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset) ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset) ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
ACCESSORS(Code, type_feedback_cells, TypeFeedbackCells, ACCESSORS(Code, type_feedback_info, Object, kTypeFeedbackInfoOffset)
kTypeFeedbackCellsOffset)
ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset) ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset)
@ -4804,6 +4799,13 @@ Object* TypeFeedbackCells::RawUninitializedSentinel(Heap* heap) {
} }
SMI_ACCESSORS(TypeFeedbackInfo, ic_total_count, kIcTotalCountOffset)
SMI_ACCESSORS(TypeFeedbackInfo, ic_with_typeinfo_count,
kIcWithTypeinfoCountOffset)
ACCESSORS(TypeFeedbackInfo, type_feedback_cells, TypeFeedbackCells,
kTypeFeedbackCellsOffset)
Relocatable::Relocatable(Isolate* isolate) { Relocatable::Relocatable(Isolate* isolate) {
ASSERT(isolate == Isolate::Current()); ASSERT(isolate == Isolate::Current());
isolate_ = isolate; isolate_ = isolate;

9
deps/v8/src/objects-printer.cc

@ -554,6 +554,15 @@ void PolymorphicCodeCache::PolymorphicCodeCachePrint(FILE* out) {
} }
void TypeFeedbackInfo::TypeFeedbackInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "TypeFeedbackInfo");
PrintF(out, "\n - ic_total_count: %d, ic_with_typeinfo_count: %d",
ic_total_count(), ic_with_typeinfo_count());
PrintF(out, "\n - type_feedback_cells: ");
type_feedback_cells()->FixedArrayPrint(out);
}
void FixedArray::FixedArrayPrint(FILE* out) { void FixedArray::FixedArrayPrint(FILE* out) {
HeapObject::PrintHeader(out, "FixedArray"); HeapObject::PrintHeader(out, "FixedArray");
PrintF(out, " - length: %d", length()); PrintF(out, " - length: %d", length());

4
deps/v8/src/objects-visiting-inl.h

@ -109,7 +109,7 @@ void Code::CodeIterateBody(ObjectVisitor* v) {
IteratePointer(v, kRelocationInfoOffset); IteratePointer(v, kRelocationInfoOffset);
IteratePointer(v, kHandlerTableOffset); IteratePointer(v, kHandlerTableOffset);
IteratePointer(v, kDeoptimizationDataOffset); IteratePointer(v, kDeoptimizationDataOffset);
IteratePointer(v, kTypeFeedbackCellsOffset); IteratePointer(v, kTypeFeedbackInfoOffset);
RelocIterator it(this, mode_mask); RelocIterator it(this, mode_mask);
for (; !it.done(); it.next()) { for (; !it.done(); it.next()) {
@ -141,7 +141,7 @@ void Code::CodeIterateBody(Heap* heap) {
reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset)); reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
StaticVisitor::VisitPointer( StaticVisitor::VisitPointer(
heap, heap,
reinterpret_cast<Object**>(this->address() + kTypeFeedbackCellsOffset)); reinterpret_cast<Object**>(this->address() + kTypeFeedbackInfoOffset));
RelocIterator it(this, mode_mask); RelocIterator it(this, mode_mask);
for (; !it.done(); it.next()) { for (; !it.done(); it.next()) {

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save