Browse Source

Upgrade V8 to 3.6.6

v0.7.4-release
Ryan Dahl 13 years ago
parent
commit
56e6952e63
  1. 55
      deps/v8/ChangeLog
  2. 45
      deps/v8/Makefile
  3. 11
      deps/v8/benchmarks/spinning-balls/index.html
  4. 326
      deps/v8/benchmarks/spinning-balls/splay-tree.js
  5. 387
      deps/v8/benchmarks/spinning-balls/v.js
  6. 12
      deps/v8/build/common.gypi
  7. 31
      deps/v8/build/standalone.gypi
  8. 5
      deps/v8/include/v8-debug.h
  9. 6
      deps/v8/include/v8-profiler.h
  10. 42
      deps/v8/include/v8.h
  11. 2
      deps/v8/src/SConscript
  12. 130
      deps/v8/src/api.cc
  13. 24
      deps/v8/src/arm/assembler-arm-inl.h
  14. 12
      deps/v8/src/arm/assembler-arm.cc
  15. 10
      deps/v8/src/arm/assembler-arm.h
  16. 1118
      deps/v8/src/arm/builtins-arm.cc
  17. 585
      deps/v8/src/arm/code-stubs-arm.cc
  18. 245
      deps/v8/src/arm/code-stubs-arm.h
  19. 8
      deps/v8/src/arm/codegen-arm.cc
  20. 10
      deps/v8/src/arm/codegen-arm.h
  21. 82
      deps/v8/src/arm/debug-arm.cc
  22. 34
      deps/v8/src/arm/deoptimizer-arm.cc
  23. 10
      deps/v8/src/arm/frames-arm.h
  24. 195
      deps/v8/src/arm/full-codegen-arm.cc
  25. 149
      deps/v8/src/arm/ic-arm.cc
  26. 38
      deps/v8/src/arm/lithium-arm.cc
  27. 16
      deps/v8/src/arm/lithium-arm.h
  28. 182
      deps/v8/src/arm/lithium-codegen-arm.cc
  29. 7
      deps/v8/src/arm/lithium-codegen-arm.h
  30. 566
      deps/v8/src/arm/macro-assembler-arm.cc
  31. 226
      deps/v8/src/arm/macro-assembler-arm.h
  32. 15
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  33. 2
      deps/v8/src/arm/simulator-arm.cc
  34. 324
      deps/v8/src/arm/stub-cache-arm.cc
  35. 151
      deps/v8/src/array.js
  36. 56
      deps/v8/src/assembler.cc
  37. 27
      deps/v8/src/assembler.h
  38. 154
      deps/v8/src/ast.cc
  39. 29
      deps/v8/src/ast.h
  40. 57
      deps/v8/src/bootstrapper.cc
  41. 146
      deps/v8/src/builtins.cc
  42. 12
      deps/v8/src/cached-powers.cc
  43. 45
      deps/v8/src/code-stubs.cc
  44. 117
      deps/v8/src/code-stubs.h
  45. 2
      deps/v8/src/codegen.cc
  46. 77
      deps/v8/src/compiler-intrinsics.h
  47. 3
      deps/v8/src/compiler.cc
  48. 100
      deps/v8/src/contexts.cc
  49. 41
      deps/v8/src/contexts.h
  50. 2
      deps/v8/src/conversions-inl.h
  51. 2
      deps/v8/src/conversions.h
  52. 2
      deps/v8/src/cpu-profiler.cc
  53. 5
      deps/v8/src/d8-debug.cc
  54. 34
      deps/v8/src/d8.cc
  55. 226
      deps/v8/src/debug.cc
  56. 90
      deps/v8/src/debug.h
  57. 70
      deps/v8/src/deoptimizer.cc
  58. 18
      deps/v8/src/deoptimizer.h
  59. 2
      deps/v8/src/disassembler.cc
  60. 11
      deps/v8/src/elements.cc
  61. 175
      deps/v8/src/execution.cc
  62. 13
      deps/v8/src/execution.h
  63. 7
      deps/v8/src/extensions/gc-extension.cc
  64. 96
      deps/v8/src/factory.cc
  65. 32
      deps/v8/src/factory.h
  66. 24
      deps/v8/src/flag-definitions.h
  67. 67
      deps/v8/src/frames-inl.h
  68. 117
      deps/v8/src/frames.cc
  69. 78
      deps/v8/src/frames.h
  70. 41
      deps/v8/src/full-codegen.cc
  71. 23
      deps/v8/src/full-codegen.h
  72. 6
      deps/v8/src/func-name-inferrer.h
  73. 4
      deps/v8/src/globals.h
  74. 13
      deps/v8/src/handles.cc
  75. 15
      deps/v8/src/handles.h
  76. 104
      deps/v8/src/heap-inl.h
  77. 1
      deps/v8/src/heap-profiler.cc
  78. 1531
      deps/v8/src/heap.cc
  79. 458
      deps/v8/src/heap.h
  80. 66
      deps/v8/src/hydrogen-instructions.cc
  81. 344
      deps/v8/src/hydrogen-instructions.h
  82. 408
      deps/v8/src/hydrogen.cc
  83. 27
      deps/v8/src/hydrogen.h
  84. 26
      deps/v8/src/ia32/assembler-ia32-inl.h
  85. 87
      deps/v8/src/ia32/assembler-ia32.cc
  86. 88
      deps/v8/src/ia32/assembler-ia32.h
  87. 1031
      deps/v8/src/ia32/builtins-ia32.cc
  88. 1112
      deps/v8/src/ia32/code-stubs-ia32.cc
  89. 291
      deps/v8/src/ia32/code-stubs-ia32.h
  90. 46
      deps/v8/src/ia32/codegen-ia32.cc
  91. 95
      deps/v8/src/ia32/debug-ia32.cc
  92. 100
      deps/v8/src/ia32/deoptimizer-ia32.cc
  93. 29
      deps/v8/src/ia32/disasm-ia32.cc
  94. 323
      deps/v8/src/ia32/full-codegen-ia32.cc
  95. 160
      deps/v8/src/ia32/ic-ia32.cc
  96. 255
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  97. 13
      deps/v8/src/ia32/lithium-codegen-ia32.h
  98. 64
      deps/v8/src/ia32/lithium-ia32.cc
  99. 28
      deps/v8/src/ia32/lithium-ia32.h
  100. 737
      deps/v8/src/ia32/macro-assembler-ia32.cc

55
deps/v8/ChangeLog

@ -1,3 +1,58 @@
2011-10-10: Version 3.6.6
Added a GC pause visualization tool.
Added presubmit=no and werror=no flags to Makefile.
ES5/Test262 conformance improvements.
Fixed compilation issues with GCC 4.5.x (issue 1743).
Bug fixes and performance improvements on all platforms.
2011-10-05: Version 3.6.5
New incremental garbage collector.
Removed the hard heap size limit (soft heap size limit is still
700/1400Mbytes by default).
Implemented ES5 generic Array.prototype.toString (Issue 1361).
V8 now allows surrogate pair codes in decodeURIComponent (Issue 1415).
Fixed x64 RegExp start-of-string bug (Issues 1746, 1748).
Fixed propertyIsEnumerable for numeric properties (Issue 1692).
Fixed the MinGW and Windows 2000 builds.
Fixed "Prototype chain is not searched if named property handler does
not set a property" (Issue 1636).
Made the RegExp.prototype object be a RegExp object (Issue 1217).
Disallowed future reserved words as labels in strict mode.
Fixed string split to correctly coerce the separator to a string
(Issue 1711).
API: Added an optional source length field to the Extension
constructor.
API: Added Debug::DisableAgent to match existing Debug::EnableAgent
(Issue 1573).
Added "native" target to Makefile for the benefit of Linux distros.
Fixed: debugger stops stepping outside evaluate (Issue 1639).
More work on ES-Harmony proxies. Still hidden behind a flag.
Bug fixes and performance improvements on all platforms.
2011-09-15: Version 3.6.4
Fixed d8's broken readline history.

45
deps/v8/Makefile

@ -32,6 +32,7 @@ LINK ?= "g++"
OUTDIR ?= out
TESTJOBS ?= -j16
GYPFLAGS ?=
TESTFLAGS ?=
# Special build flags. Use them like this: "make library=shared"
@ -50,6 +51,10 @@ endif
ifeq ($(disassembler), on)
GYPFLAGS += -Dv8_enable_disassembler=1
endif
# objectprint=on
ifeq ($(objectprint), on)
GYPFLAGS += -Dv8_object_print=1
endif
# snapshot=off
ifeq ($(snapshot), off)
GYPFLAGS += -Dv8_use_snapshot='false'
@ -72,12 +77,21 @@ endif
ifdef soname_version
GYPFLAGS += -Dsoname_version=$(soname_version)
endif
# werror=no
ifeq ($(werror), no)
GYPFLAGS += -Dwerror=''
endif
# presubmit=no
ifeq ($(presubmit), no)
TESTFLAGS += --no-presubmit
endif
# ----------------- available targets: --------------------
# - "dependencies": pulls in external dependencies (currently: GYP)
# - any arch listed in ARCHES (see below)
# - any mode listed in MODES
# - every combination <arch>.<mode>, e.g. "ia32.release"
# - "native": current host's architecture, release mode
# - any of the above with .check appended, e.g. "ia32.release.check"
# - default (no target specified): build all ARCHES and MODES
# - "check": build all targets and run all tests
@ -103,7 +117,7 @@ CHECKS = $(addsuffix .check,$(BUILDS))
# File where previously used GYPFLAGS are stored.
ENVFILE = $(OUTDIR)/environment
.PHONY: all check clean dependencies $(ENVFILE).new \
.PHONY: all check clean dependencies $(ENVFILE).new native \
$(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
$(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES))
@ -124,21 +138,31 @@ $(BUILDS): $(OUTDIR)/Makefile-$$(basename $$@)
python -c "print raw_input().capitalize()") \
builddir="$(shell pwd)/$(OUTDIR)/$@"
native: $(OUTDIR)/Makefile-native
@$(MAKE) -C "$(OUTDIR)" -f Makefile-native \
CXX="$(CXX)" LINK="$(LINK)" BUILDTYPE=Release \
builddir="$(shell pwd)/$(OUTDIR)/$@"
# Test targets.
check: all
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR)
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
$(TESTFLAGS)
$(addsuffix .check,$(MODES)): $$(basename $$@)
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
--mode=$(basename $@)
--mode=$(basename $@) $(TESTFLAGS)
$(addsuffix .check,$(ARCHES)): $$(basename $$@)
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch=$(basename $@)
--arch=$(basename $@) $(TESTFLAGS)
$(CHECKS): $$(basename $$@)
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@)
--arch-and-mode=$(basename $@) $(TESTFLAGS)
native.check: native
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR)/native \
--arch-and-mode=. $(TESTFLAGS)
# Clean targets. You can clean each architecture individually, or everything.
$(addsuffix .clean,$(ARCHES)):
@ -147,7 +171,12 @@ $(addsuffix .clean,$(ARCHES)):
rm -rf $(OUTDIR)/$(basename $@).debug
find $(OUTDIR) -regex '.*\(host\|target\)-$(basename $@)\.mk' -delete
clean: $(addsuffix .clean,$(ARCHES))
native.clean:
rm -f $(OUTDIR)/Makefile-native
rm -rf $(OUTDIR)/native
find $(OUTDIR) -regex '.*\(host\|target\)-native\.mk' -delete
clean: $(addsuffix .clean,$(ARCHES)) native.clean
# GYP file generation targets.
$(OUTDIR)/Makefile-ia32: $(GYPFILES) $(ENVFILE)
@ -165,6 +194,10 @@ $(OUTDIR)/Makefile-arm: $(GYPFILES) $(ENVFILE)
-Ibuild/standalone.gypi --depth=. -Ibuild/armu.gypi \
-S-arm $(GYPFLAGS)
$(OUTDIR)/Makefile-native: $(GYPFILES) $(ENVFILE)
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -S-native $(GYPFLAGS)
# Replaces the old with the new environment file if they're different, which
# will trigger GYP to regenerate Makefiles.
$(ENVFILE): $(ENVFILE).new

11
deps/v8/benchmarks/spinning-balls/index.html

@ -0,0 +1,11 @@
<html>
<head>
<style>
body { text-align: center; }
</style>
</head>
<body>
<script type="text/javascript" src="splay-tree.js"></script>
<script type="text/javascript" src="v.js"></script>
</body>
</html>

326
deps/v8/benchmarks/spinning-balls/splay-tree.js

@ -0,0 +1,326 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/**
* Constructs a Splay tree. A splay tree is a self-balancing binary
* search tree with the additional property that recently accessed
* elements are quick to access again. It performs basic operations
* such as insertion, look-up and removal in O(log(n)) amortized time.
*
* @constructor
*/
function SplayTree() {
};
/**
* Pointer to the root node of the tree.
*
* @type {SplayTree.Node}
* @private
*/
SplayTree.prototype.root_ = null;
/**
* @return {boolean} Whether the tree is empty.
*/
SplayTree.prototype.isEmpty = function() {
return !this.root_;
};
/**
* Inserts a node into the tree with the specified key and value if
* the tree does not already contain a node with the specified key. If
* the value is inserted, it becomes the root of the tree.
*
* @param {number} key Key to insert into the tree.
* @param {*} value Value to insert into the tree.
*/
SplayTree.prototype.insert = function(key, value) {
if (this.isEmpty()) {
this.root_ = new SplayTree.Node(key, value);
return;
}
// Splay on the key to move the last node on the search path for
// the key to the root of the tree.
this.splay_(key);
if (this.root_.key == key) {
return;
}
var node = new SplayTree.Node(key, value);
if (key > this.root_.key) {
node.left = this.root_;
node.right = this.root_.right;
this.root_.right = null;
} else {
node.right = this.root_;
node.left = this.root_.left;
this.root_.left = null;
}
this.root_ = node;
};
/**
* Removes a node with the specified key from the tree if the tree
* contains a node with this key. The removed node is returned. If the
* key is not found, an exception is thrown.
*
* @param {number} key Key to find and remove from the tree.
* @return {SplayTree.Node} The removed node.
*/
SplayTree.prototype.remove = function(key) {
if (this.isEmpty()) {
throw Error('Key not found: ' + key);
}
this.splay_(key);
if (this.root_.key != key) {
throw Error('Key not found: ' + key);
}
var removed = this.root_;
if (!this.root_.left) {
this.root_ = this.root_.right;
} else {
var right = this.root_.right;
this.root_ = this.root_.left;
// Splay to make sure that the new root has an empty right child.
this.splay_(key);
// Insert the original right child as the right child of the new
// root.
this.root_.right = right;
}
return removed;
};
/**
* Returns the node having the specified key or null if the tree doesn't contain
* a node with the specified key.
*
* @param {number} key Key to find in the tree.
* @return {SplayTree.Node} Node having the specified key.
*/
SplayTree.prototype.find = function(key) {
if (this.isEmpty()) {
return null;
}
this.splay_(key);
return this.root_.key == key ? this.root_ : null;
};
/**
* @return {SplayTree.Node} Node having the maximum key value.
*/
SplayTree.prototype.findMax = function(opt_startNode) {
if (this.isEmpty()) {
return null;
}
var current = opt_startNode || this.root_;
while (current.right) {
current = current.right;
}
return current;
};
/**
* @return {SplayTree.Node} Node having the maximum key value that
* is less than the specified key value.
*/
SplayTree.prototype.findGreatestLessThan = function(key) {
if (this.isEmpty()) {
return null;
}
// Splay on the key to move the node with the given key or the last
// node on the search path to the top of the tree.
this.splay_(key);
// Now the result is either the root node or the greatest node in
// the left subtree.
if (this.root_.key < key) {
return this.root_;
} else if (this.root_.left) {
return this.findMax(this.root_.left);
} else {
return null;
}
};
/**
* @return {Array<*>} An array containing all the keys of tree's nodes.
*/
SplayTree.prototype.exportKeys = function() {
var result = [];
if (!this.isEmpty()) {
this.root_.traverse_(function(node) { result.push(node.key); });
}
return result;
};
/**
* Perform the splay operation for the given key. Moves the node with
* the given key to the top of the tree. If no node has the given
* key, the last node on the search path is moved to the top of the
* tree. This is the simplified top-down splaying algorithm from:
* "Self-adjusting Binary Search Trees" by Sleator and Tarjan
*
* @param {number} key Key to splay the tree on.
* @private
*/
SplayTree.prototype.splay_ = function(key) {
if (this.isEmpty()) {
return;
}
// Create a dummy node. The use of the dummy node is a bit
// counter-intuitive: The right child of the dummy node will hold
// the L tree of the algorithm. The left child of the dummy node
// will hold the R tree of the algorithm. Using a dummy node, left
// and right will always be nodes and we avoid special cases.
var dummy, left, right;
dummy = left = right = new SplayTree.Node(null, null);
var current = this.root_;
while (true) {
if (key < current.key) {
if (!current.left) {
break;
}
if (key < current.left.key) {
// Rotate right.
var tmp = current.left;
current.left = tmp.right;
tmp.right = current;
current = tmp;
if (!current.left) {
break;
}
}
// Link right.
right.left = current;
right = current;
current = current.left;
} else if (key > current.key) {
if (!current.right) {
break;
}
if (key > current.right.key) {
// Rotate left.
var tmp = current.right;
current.right = tmp.left;
tmp.left = current;
current = tmp;
if (!current.right) {
break;
}
}
// Link left.
left.right = current;
left = current;
current = current.right;
} else {
break;
}
}
// Assemble.
left.right = current.left;
right.left = current.right;
current.left = dummy.right;
current.right = dummy.left;
this.root_ = current;
};
/**
* Constructs a Splay tree node.
*
* @param {number} key Key.
* @param {*} value Value.
*/
SplayTree.Node = function(key, value) {
this.key = key;
this.value = value;
};
/**
* @type {SplayTree.Node}
*/
SplayTree.Node.prototype.left = null;
/**
* @type {SplayTree.Node}
*/
SplayTree.Node.prototype.right = null;
/**
* Performs an ordered traversal of the subtree starting at
* this SplayTree.Node.
*
* @param {function(SplayTree.Node)} f Visitor function.
* @private
*/
SplayTree.Node.prototype.traverse_ = function(f) {
var current = this;
while (current) {
var left = current.left;
if (left) left.traverse_(f);
f(current);
current = current.right;
}
};
SplayTree.prototype.traverseBreadthFirst = function (f) {
if (f(this.root_.value)) return;
var stack = [this.root_];
var length = 1;
while (length > 0) {
var new_stack = new Array(stack.length * 2);
var new_length = 0;
for (var i = 0; i < length; i++) {
var n = stack[i];
var l = n.left;
var r = n.right;
if (l) {
if (f(l.value)) return;
new_stack[new_length++] = l;
}
if (r) {
if (f(r.value)) return;
new_stack[new_length++] = r;
}
}
stack = new_stack;
length = new_length;
}
};

387
deps/v8/benchmarks/spinning-balls/v.js

@ -0,0 +1,387 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/**
* This function provides requestAnimationFrame in a cross browser way.
* http://paulirish.com/2011/requestanimationframe-for-smart-animating/
*/
if ( !window.requestAnimationFrame ) {
window.requestAnimationFrame = ( function() {
return window.webkitRequestAnimationFrame ||
window.mozRequestAnimationFrame ||
window.oRequestAnimationFrame ||
window.msRequestAnimationFrame ||
function(callback, element) {
window.setTimeout( callback, 1000 / 60 );
};
} )();
}
var kNPoints = 8000;
var kNModifications = 20;
var kNVisiblePoints = 200;
var kDecaySpeed = 20;
var kPointRadius = 4;
var kInitialLifeForce = 100;
var livePoints = void 0;
var dyingPoints = void 0;
var scene = void 0;
var renderingStartTime = void 0;
var scene = void 0;
var pausePlot = void 0;
var splayTree = void 0;
function Point(x, y, z, payload) {
this.x = x;
this.y = y;
this.z = z;
this.next = null;
this.prev = null;
this.payload = payload;
this.lifeForce = kInitialLifeForce;
}
Point.prototype.color = function () {
return "rgba(0, 0, 0, " + (this.lifeForce / kInitialLifeForce) + ")";
};
Point.prototype.decay = function () {
this.lifeForce -= kDecaySpeed;
return this.lifeForce <= 0;
};
function PointsList() {
this.head = null;
this.count = 0;
}
PointsList.prototype.add = function (point) {
if (this.head !== null) this.head.prev = point;
point.next = this.head;
this.head = point;
this.count++;
}
PointsList.prototype.remove = function (point) {
if (point.next !== null) {
point.next.prev = point.prev;
}
if (point.prev !== null) {
point.prev.next = point.next;
} else {
this.head = point.next;
}
this.count--;
}
function GeneratePayloadTree(depth, tag) {
if (depth == 0) {
return {
array : [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ],
string : 'String for key ' + tag + ' in leaf node'
};
} else {
return {
left: GeneratePayloadTree(depth - 1, tag),
right: GeneratePayloadTree(depth - 1, tag)
};
}
}
// To make the benchmark results predictable, we replace Math.random
// with a 100% deterministic alternative.
Math.random = (function() {
var seed = 49734321;
return function() {
// Robert Jenkins' 32 bit integer hash function.
seed = ((seed + 0x7ed55d16) + (seed << 12)) & 0xffffffff;
seed = ((seed ^ 0xc761c23c) ^ (seed >>> 19)) & 0xffffffff;
seed = ((seed + 0x165667b1) + (seed << 5)) & 0xffffffff;
seed = ((seed + 0xd3a2646c) ^ (seed << 9)) & 0xffffffff;
seed = ((seed + 0xfd7046c5) + (seed << 3)) & 0xffffffff;
seed = ((seed ^ 0xb55a4f09) ^ (seed >>> 16)) & 0xffffffff;
return (seed & 0xfffffff) / 0x10000000;
};
})();
function GenerateKey() {
// The benchmark framework guarantees that Math.random is
// deterministic; see base.js.
return Math.random();
}
function CreateNewPoint() {
// Insert new node with a unique key.
var key;
do { key = GenerateKey(); } while (splayTree.find(key) != null);
var point = new Point(Math.random() * 40 - 20,
Math.random() * 40 - 20,
Math.random() * 40 - 20,
GeneratePayloadTree(5, "" + key));
livePoints.add(point);
splayTree.insert(key, point);
return key;
}
function ModifyPointsSet() {
if (livePoints.count < kNPoints) {
for (var i = 0; i < kNModifications; i++) {
CreateNewPoint();
}
} else if (kNModifications === 20) {
kNModifications = 80;
kDecay = 30;
}
for (var i = 0; i < kNModifications; i++) {
var key = CreateNewPoint();
var greatest = splayTree.findGreatestLessThan(key);
if (greatest == null) {
var point = splayTree.remove(key).value;
} else {
var point = splayTree.remove(greatest.key).value;
}
livePoints.remove(point);
point.payload = null;
dyingPoints.add(point);
}
}
function PausePlot(width, height, size) {
var canvas = document.createElement("canvas");
canvas.width = this.width = width;
canvas.height = this.height = height;
document.body.appendChild(canvas);
this.ctx = canvas.getContext('2d');
this.maxPause = 0;
this.size = size;
// Initialize cyclic buffer for pauses.
this.pauses = new Array(this.size);
this.start = this.size;
this.idx = 0;
}
PausePlot.prototype.addPause = function (p) {
if (this.idx === this.size) {
this.idx = 0;
}
if (this.idx === this.start) {
this.start++;
}
if (this.start === this.size) {
this.start = 0;
}
this.pauses[this.idx++] = p;
};
PausePlot.prototype.iteratePauses = function (f) {
if (this.start < this.idx) {
for (var i = this.start; i < this.idx; i++) {
f.call(this, i - this.start, this.pauses[i]);
}
} else {
for (var i = this.start; i < this.size; i++) {
f.call(this, i - this.start, this.pauses[i]);
}
var offs = this.size - this.start;
for (var i = 0; i < this.idx; i++) {
f.call(this, i + offs, this.pauses[i]);
}
}
};
PausePlot.prototype.draw = function () {
var first = null;
this.iteratePauses(function (i, v) {
if (first === null) {
first = v;
}
this.maxPause = Math.max(v, this.maxPause);
});
var dx = this.width / this.size;
var dy = this.height / this.maxPause;
this.ctx.save();
this.ctx.clearRect(0, 0, 480, 240);
this.ctx.beginPath();
this.ctx.moveTo(1, dy * this.pauses[this.start]);
var p = first;
this.iteratePauses(function (i, v) {
var delta = v - p;
var x = 1 + dx * i;
var y = dy * v;
this.ctx.lineTo(x, y);
if (delta > 2 * (p / 3)) {
this.ctx.font = "bold 12px sans-serif";
this.ctx.textBaseline = "bottom";
this.ctx.fillText(v + "ms", x + 2, y);
}
p = v;
});
this.ctx.strokeStyle = "black";
this.ctx.stroke();
this.ctx.restore();
}
function Scene(width, height) {
var canvas = document.createElement("canvas");
canvas.width = width;
canvas.height = height;
document.body.appendChild(canvas);
this.ctx = canvas.getContext('2d');
this.width = canvas.width;
this.height = canvas.height;
// Projection configuration.
this.x0 = canvas.width / 2;
this.y0 = canvas.height / 2;
this.z0 = 100;
this.f = 1000; // Focal length.
// Camera is rotating around y-axis.
this.angle = 0;
}
Scene.prototype.drawPoint = function (x, y, z, color) {
// Rotate the camera around y-axis.
var rx = x * Math.cos(this.angle) - z * Math.sin(this.angle);
var ry = y;
var rz = x * Math.sin(this.angle) + z * Math.cos(this.angle);
// Perform perspective projection.
var px = (this.f * rx) / (rz - this.z0) + this.x0;
var py = (this.f * ry) / (rz - this.z0) + this.y0;
this.ctx.save();
this.ctx.fillStyle = color
this.ctx.beginPath();
this.ctx.arc(px, py, kPointRadius, 0, 2 * Math.PI, true);
this.ctx.fill();
this.ctx.restore();
};
Scene.prototype.drawDyingPoints = function () {
var point_next = null;
for (var point = dyingPoints.head; point !== null; point = point_next) {
// Rotate the scene around y-axis.
scene.drawPoint(point.x, point.y, point.z, point.color());
point_next = point.next;
// Decay the current point and remove it from the list
// if it's life-force ran out.
if (point.decay()) {
dyingPoints.remove(point);
}
}
};
Scene.prototype.draw = function () {
this.ctx.save();
this.ctx.clearRect(0, 0, this.width, this.height);
this.drawDyingPoints();
this.ctx.restore();
this.angle += Math.PI / 90.0;
};
function render() {
if (typeof renderingStartTime === 'undefined') {
renderingStartTime = Date.now();
}
ModifyPointsSet();
scene.draw();
var renderingEndTime = Date.now();
var pause = renderingEndTime - renderingStartTime;
pausePlot.addPause(pause);
renderingStartTime = renderingEndTime;
pausePlot.draw();
div.innerHTML =
livePoints.count + "/" + dyingPoints.count + " " +
pause + "(max = " + pausePlot.maxPause + ") ms" ;
// Schedule next frame.
requestAnimationFrame(render);
}
function init() {
livePoints = new PointsList;
dyingPoints = new PointsList;
splayTree = new SplayTree();
scene = new Scene(640, 480);
div = document.createElement("div");
document.body.appendChild(div);
pausePlot = new PausePlot(480, 240, 160);
}
init();
render();

12
deps/v8/build/common.gypi

@ -60,6 +60,8 @@
'v8_enable_disassembler%': 0,
'v8_object_print%': 0,
'v8_enable_gdbjit%': 0,
# Enable profiling support. Only required on Windows.
@ -72,6 +74,7 @@
'v8_use_snapshot%': 'true',
'host_os%': '<(OS)',
'v8_use_liveobjectlist%': 'false',
'werror%': '-Werror',
# For a shared library build, results in "libv8-<(soname_version).so".
'soname_version%': '',
@ -84,6 +87,9 @@
['v8_enable_disassembler==1', {
'defines': ['ENABLE_DISASSEMBLER',],
}],
['v8_object_print==1', {
'defines': ['OBJECT_PRINT',],
}],
['v8_enable_gdbjit==1', {
'defines': ['ENABLE_GDB_JIT_INTERFACE',],
}],
@ -185,7 +191,7 @@
],
}],
['OS=="solaris"', {
'defines': [ '__C99FEATURES__=1' ], # isinf() etc.
'defines': [ '__C99FEATURES__=1' ], # isinf() etc.
}],
],
'configurations': {
@ -221,7 +227,7 @@
'cflags': [ '-I/usr/local/include' ],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
'cflags': [ '-Wall', '-Werror', '-W', '-Wno-unused-parameter',
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor' ],
}],
],
@ -264,7 +270,7 @@
}],
['OS=="win"', {
'msvs_configuration_attributes': {
'OutputDirectory': '$(SolutionDir)$(ConfigurationName)',
'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},

31
deps/v8/build/standalone.gypi

@ -35,25 +35,30 @@
'msvs_multi_core_compile%': '1',
'variables': {
'variables': {
'conditions': [
[ 'OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
# This handles the Linux platforms we generally deal with. Anything
# else gets passed through, which probably won't work very well; such
# hosts should pass an explicit target_arch to gyp.
'host_arch%':
'<!(uname -m | sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/")',
}, { # OS!="linux" and OS!="freebsd" and OS!="openbsd"
'host_arch%': 'ia32',
}],
],
'variables': {
'conditions': [
[ 'OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
# This handles the Linux platforms we generally deal with. Anything
# else gets passed through, which probably won't work very well; such
# hosts should pass an explicit target_arch to gyp.
'host_arch%':
'<!(uname -m | sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/")',
}, { # OS!="linux" and OS!="freebsd" and OS!="openbsd"
'host_arch%': 'ia32',
}],
],
},
'host_arch%': '<(host_arch)',
'target_arch%': '<(host_arch)',
},
'host_arch%': '<(host_arch)',
'target_arch%': '<(host_arch)',
'target_arch%': '<(target_arch)',
'v8_target_arch%': '<(target_arch)',
},
'host_arch%': '<(host_arch)',
'target_arch%': '<(target_arch)',
'v8_target_arch%': '<(v8_target_arch)',
'werror%': '-Werror',
'conditions': [
['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="x64" and host_arch!="x64")', {
@ -74,7 +79,7 @@
'conditions': [
[ 'OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
'target_defaults': {
'cflags': [ '-Wall', '-Werror', '-W', '-Wno-unused-parameter',
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-pthread', '-fno-rtti',
'-fno-exceptions', '-pedantic' ],
'ldflags': [ '-pthread', ],

5
deps/v8/include/v8-debug.h

@ -339,6 +339,11 @@ class EXPORT Debug {
static bool EnableAgent(const char* name, int port,
bool wait_for_connection = false);
/**
* Disable the V8 builtin debug agent. The TCP/IP connection will be closed.
*/
static void DisableAgent();
/**
* Makes V8 process all pending debug messages.
*

6
deps/v8/include/v8-profiler.h

@ -307,6 +307,12 @@ class V8EXPORT HeapGraphNode {
* path from the snapshot root to the current node.
*/
const HeapGraphNode* GetDominatorNode() const;
/**
* Finds and returns a value from the heap corresponding to this node,
* if the value is still reachable.
*/
Handle<Value> GetHeapValue() const;
};

42
deps/v8/include/v8.h

@ -1171,7 +1171,8 @@ class String : public Primitive {
* Get the ExternalAsciiStringResource for an external ASCII string.
* Returns NULL if IsExternalAscii() doesn't return true.
*/
V8EXPORT ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
V8EXPORT const ExternalAsciiStringResource* GetExternalAsciiStringResource()
const;
static inline String* Cast(v8::Value* obj);
@ -2451,24 +2452,42 @@ class V8EXPORT TypeSwitch : public Data {
// --- Extensions ---
class V8EXPORT ExternalAsciiStringResourceImpl
: public String::ExternalAsciiStringResource {
public:
ExternalAsciiStringResourceImpl() : data_(0), length_(0) {}
ExternalAsciiStringResourceImpl(const char* data, size_t length)
: data_(data), length_(length) {}
const char* data() const { return data_; }
size_t length() const { return length_; }
private:
const char* data_;
size_t length_;
};
/**
* Ignore
*/
class V8EXPORT Extension { // NOLINT
public:
// Note that the strings passed into this constructor must live as long
// as the Extension itself.
Extension(const char* name,
const char* source = 0,
int dep_count = 0,
const char** deps = 0);
const char** deps = 0,
int source_length = -1);
virtual ~Extension() { }
virtual v8::Handle<v8::FunctionTemplate>
GetNativeFunction(v8::Handle<v8::String> name) {
return v8::Handle<v8::FunctionTemplate>();
}
const char* name() { return name_; }
const char* source() { return source_; }
const char* name() const { return name_; }
size_t source_length() const { return source_length_; }
const String::ExternalAsciiStringResource* source() const {
return &source_; }
int dependency_count() { return dep_count_; }
const char** dependencies() { return deps_; }
void set_auto_enable(bool value) { auto_enable_ = value; }
@ -2476,7 +2495,8 @@ class V8EXPORT Extension { // NOLINT
private:
const char* name_;
const char* source_;
size_t source_length_; // expected to initialize before source_
ExternalAsciiStringResourceImpl source_;
int dep_count_;
const char** deps_;
bool auto_enable_;
@ -3498,9 +3518,9 @@ class V8EXPORT Context {
*
* v8::Locker is a scoped lock object. While it's
* active (i.e. between its construction and destruction) the current thread is
* allowed to use the locked isolate. V8 guarantees that an isolate can be locked
* by at most one thread at any time. In other words, the scope of a v8::Locker is
* a critical section.
* allowed to use the locked isolate. V8 guarantees that an isolate can be
* locked by at most one thread at any time. In other words, the scope of a
* v8::Locker is a critical section.
*
* Sample usage:
* \code
@ -3602,8 +3622,8 @@ class V8EXPORT Locker {
static void StopPreemption();
/**
* Returns whether or not the locker for a given isolate, or default isolate if NULL is given,
* is locked by the current thread.
* Returns whether or not the locker for a given isolate, or default isolate
* if NULL is given, is locked by the current thread.
*/
static bool IsLocked(Isolate* isolate = NULL);
@ -3769,7 +3789,7 @@ class Internals {
static const int kFullStringRepresentationMask = 0x07;
static const int kExternalTwoByteRepresentationTag = 0x02;
static const int kJSObjectType = 0xa3;
static const int kJSObjectType = 0xa6;
static const int kFirstNonstringType = 0x80;
static const int kForeignType = 0x85;

2
deps/v8/src/SConscript

@ -84,6 +84,7 @@ SOURCES = {
hydrogen.cc
hydrogen-instructions.cc
ic.cc
incremental-marking.cc
inspector.cc
interpreter-irregexp.cc
isolate.cc
@ -133,6 +134,7 @@ SOURCES = {
v8utils.cc
variables.cc
version.cc
store-buffer.cc
zone.cc
extensions/gc-extension.cc
extensions/externalize-string-extension.cc

130
deps/v8/src/api.cc

@ -185,7 +185,10 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
int end_marker;
heap_stats.end_marker = &end_marker;
i::Isolate* isolate = i::Isolate::Current();
isolate->heap()->RecordStats(&heap_stats, take_snapshot);
// BUG(1718):
// Don't use the take_snapshot since we don't support HeapIterator here
// without doing a special GC.
isolate->heap()->RecordStats(&heap_stats, false);
i::V8::SetFatalError();
FatalErrorCallback callback = GetFatalErrorHandler();
{
@ -501,9 +504,12 @@ void RegisterExtension(Extension* that) {
Extension::Extension(const char* name,
const char* source,
int dep_count,
const char** deps)
const char** deps,
int source_length)
: name_(name),
source_(source),
source_length_(source_length >= 0 ?
source_length : (source ? strlen(source) : 0)),
source_(source, source_length_),
dep_count_(dep_count),
deps_(deps),
auto_enable_(false) { }
@ -1781,7 +1787,7 @@ v8::Handle<v8::StackTrace> Message::GetStackTrace() const {
static i::Handle<i::Object> CallV8HeapFunction(const char* name,
i::Handle<i::Object> recv,
int argc,
i::Object** argv[],
i::Handle<i::Object> argv[],
bool* has_pending_exception) {
i::Isolate* isolate = i::Isolate::Current();
i::Handle<i::String> fmt_str = isolate->factory()->LookupAsciiSymbol(name);
@ -1798,10 +1804,10 @@ static i::Handle<i::Object> CallV8HeapFunction(const char* name,
static i::Handle<i::Object> CallV8HeapFunction(const char* name,
i::Handle<i::Object> data,
bool* has_pending_exception) {
i::Object** argv[1] = { data.location() };
i::Handle<i::Object> argv[] = { data };
return CallV8HeapFunction(name,
i::Isolate::Current()->js_builtins_object(),
1,
ARRAY_SIZE(argv),
argv,
has_pending_exception);
}
@ -2621,10 +2627,11 @@ bool Value::Equals(Handle<Value> that) const {
if (obj->IsJSObject() && other->IsJSObject()) {
return *obj == *other;
}
i::Object** args[1] = { other.location() };
i::Handle<i::Object> args[] = { other };
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> result =
CallV8HeapFunction("EQUALS", obj, 1, args, &has_pending_exception);
CallV8HeapFunction("EQUALS", obj, ARRAY_SIZE(args), args,
&has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, false);
return *result == i::Smi::FromInt(i::EQUAL);
}
@ -3204,21 +3211,10 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
self,
i::JSObject::ALLOW_CREATION));
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj = i::SetProperty(
hidden_props,
key_obj,
value_obj,
static_cast<PropertyAttributes>(None),
i::kNonStrictMode);
has_pending_exception = obj.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, false);
return true;
i::Handle<i::Object> result = i::SetHiddenProperty(self, key_obj, value_obj);
return *result == *self;
}
@ -3228,20 +3224,9 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
return Local<v8::Value>());
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
self,
i::JSObject::OMIT_CREATION));
if (hidden_props->IsUndefined()) {
return v8::Local<v8::Value>();
}
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> result = i::GetProperty(hidden_props, key_obj);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, v8::Local<v8::Value>());
if (result->IsUndefined()) {
return v8::Local<v8::Value>();
}
i::Handle<i::Object> result(self->GetHiddenProperty(*key_obj));
if (result->IsUndefined()) return v8::Local<v8::Value>();
return Utils::ToLocal(result);
}
@ -3252,15 +3237,9 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
self,
i::JSObject::OMIT_CREATION));
if (hidden_props->IsUndefined()) {
return true;
}
i::Handle<i::JSObject> js_obj(i::JSObject::cast(*hidden_props));
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
return i::DeleteProperty(js_obj, key_obj)->IsTrue();
self->DeleteHiddenProperty(*key_obj);
return true;
}
@ -3310,22 +3289,12 @@ void PrepareExternalArrayElements(i::Handle<i::JSObject> object,
i::Handle<i::ExternalArray> array =
isolate->factory()->NewExternalArray(length, array_type, data);
// If the object already has external elements, create a new, unique
// map if the element type is now changing, because assumptions about
// generated code based on the receiver's map will be invalid.
i::Handle<i::HeapObject> elements(object->elements());
bool cant_reuse_map =
elements->map()->IsUndefined() ||
!elements->map()->has_external_array_elements() ||
elements->map() != isolate->heap()->MapForExternalArrayType(array_type);
if (cant_reuse_map) {
i::Handle<i::Map> external_array_map =
isolate->factory()->GetElementsTransitionMap(
i::Handle<i::Map>(object->map()),
GetElementsKindFromExternalArrayType(array_type),
object->HasFastProperties());
object->set_map(*external_array_map);
}
i::Handle<i::Map> external_array_map =
isolate->factory()->GetElementsTransitionMap(
object,
GetElementsKindFromExternalArrayType(array_type));
object->set_map(*external_array_map);
object->set_elements(*array);
}
@ -3484,7 +3453,8 @@ bool v8::Object::IsCallable() {
}
Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv, int argc,
Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
int argc,
v8::Handle<v8::Value> argv[]) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::CallAsFunction()",
@ -3495,7 +3465,7 @@ Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv, int argc,
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Object*** args = reinterpret_cast<i::Object***>(argv);
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>();
if (obj->IsJSFunction()) {
fun = i::Handle<i::JSFunction>::cast(obj);
@ -3525,7 +3495,7 @@ Local<v8::Value> Object::CallAsConstructor(int argc,
i::HandleScope scope(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Object*** args = reinterpret_cast<i::Object***>(argv);
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
if (obj->IsJSFunction()) {
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(obj);
EXCEPTION_PREAMBLE(isolate);
@ -3567,7 +3537,7 @@ Local<v8::Object> Function::NewInstance(int argc,
HandleScope scope;
i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Object*** args = reinterpret_cast<i::Object***>(argv);
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> returned =
i::Execution::New(function, argc, args, &has_pending_exception);
@ -3588,7 +3558,7 @@ Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Object*** args = reinterpret_cast<i::Object***>(argv);
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> returned =
i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
@ -3799,10 +3769,11 @@ bool v8::String::IsExternalAscii() const {
void v8::String::VerifyExternalStringResource(
v8::String::ExternalStringResource* value) const {
i::Handle<i::String> str = Utils::OpenHandle(this);
v8::String::ExternalStringResource* expected;
const v8::String::ExternalStringResource* expected;
if (i::StringShape(*str).IsExternalTwoByte()) {
void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
expected = reinterpret_cast<ExternalStringResource*>(resource);
const void* resource =
i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
expected = reinterpret_cast<const ExternalStringResource*>(resource);
} else {
expected = NULL;
}
@ -3810,7 +3781,7 @@ void v8::String::VerifyExternalStringResource(
}
v8::String::ExternalAsciiStringResource*
const v8::String::ExternalAsciiStringResource*
v8::String::GetExternalAsciiStringResource() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
if (IsDeadCheck(str->GetIsolate(),
@ -3818,8 +3789,9 @@ v8::String::ExternalAsciiStringResource*
return NULL;
}
if (i::StringShape(*str).IsExternalAscii()) {
void* resource = i::Handle<i::ExternalAsciiString>::cast(str)->resource();
return reinterpret_cast<ExternalAsciiStringResource*>(resource);
const void* resource =
i::Handle<i::ExternalAsciiString>::cast(str)->resource();
return reinterpret_cast<const ExternalAsciiStringResource*>(resource);
} else {
return NULL;
}
@ -4009,7 +3981,7 @@ bool v8::V8::IdleNotification() {
void v8::V8::LowMemoryNotification() {
i::Isolate* isolate = i::Isolate::Current();
if (!isolate->IsInitialized()) return;
isolate->heap()->CollectAllGarbage(true);
isolate->heap()->CollectAllAvailableGarbage();
}
@ -5480,6 +5452,12 @@ bool Debug::EnableAgent(const char* name, int port, bool wait_for_connection) {
wait_for_connection);
}
void Debug::DisableAgent() {
return i::Isolate::Current()->debugger()->StopAgent();
}
void Debug::ProcessDebugMessages() {
i::Execution::ProcessDebugMesssages(true);
}
@ -5804,6 +5782,16 @@ const HeapGraphNode* HeapGraphNode::GetDominatorNode() const {
}
v8::Handle<v8::Value> HeapGraphNode::GetHeapValue() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapGraphNode::GetHeapValue");
i::Handle<i::HeapObject> object = ToInternal(this)->GetHeapObject();
return v8::Handle<Value>(!object.is_null() ?
ToApi<Value>(object) : ToApi<Value>(
isolate->factory()->undefined_value()));
}
static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
return const_cast<i::HeapSnapshot*>(
reinterpret_cast<const i::HeapSnapshot*>(snapshot));

24
deps/v8/src/arm/assembler-arm-inl.h

@ -77,6 +77,11 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target);
if (host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
}
}
@ -101,6 +106,10 @@ Object** RelocInfo::target_object_address() {
void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
if (host() != NULL && target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
host(), &Memory::Object_at(pc_), HeapObject::cast(target));
}
}
@ -131,6 +140,12 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
if (host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
// evacuation candidate.
host()->GetHeap()->incremental_marking()->RecordWrite(
host(), NULL, cell);
}
}
@ -147,6 +162,11 @@ void RelocInfo::set_call_address(Address target) {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
}
}
@ -195,7 +215,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitPointer(target_object_address());
visitor->VisitEmbeddedPointer(host(), target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
@ -221,7 +241,7 @@ template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitPointer(heap, target_object_address());
StaticVisitor::VisitEmbeddedPointer(heap, host(), target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {

12
deps/v8/src/arm/assembler-arm.cc

@ -78,7 +78,9 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
void CpuFeatures::Probe() {
ASSERT(!initialized_);
unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
CpuFeaturesImpliedByCompiler());
ASSERT(supported_ == 0 || supported_ == standard_features);
#ifdef DEBUG
initialized_ = true;
#endif
@ -86,8 +88,7 @@ void CpuFeatures::Probe() {
// Get the features implied by the OS and the compiler settings. This is the
// minimal set of features which is also alowed for generated code in the
// snapshot.
supported_ |= OS::CpuFeaturesImpliedByPlatform();
supported_ |= CpuFeaturesImpliedByCompiler();
supported_ |= standard_features;
if (Serializer::enabled()) {
// No probing for features if we might serialize (generate snapshot).
@ -2505,7 +2506,8 @@ void Assembler::dd(uint32_t data) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
// We do not try to reuse pool constants.
RelocInfo rinfo(pc_, rmode, data, NULL);
if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
// Adjust code for new modes.
ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
@ -2537,7 +2539,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {

10
deps/v8/src/arm/assembler-arm.h

@ -1209,6 +1209,10 @@ class Assembler : public AssemblerBase {
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Read/patch instructions
Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
void instr_at_put(int pos, Instr instr) {
*reinterpret_cast<Instr*>(buffer_ + pos) = instr;
}
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
static void instr_at_put(byte* pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr;
@ -1263,12 +1267,6 @@ class Assembler : public AssemblerBase {
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Read/patch instructions
Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
void instr_at_put(int pos, Instr instr) {
*reinterpret_cast<Instr*>(buffer_ + pos) = instr;
}
// Decode branch instruction at pos and return branch target pos
int target_at(int pos);

1118
deps/v8/src/arm/builtins-arm.cc

File diff suppressed because it is too large

585
deps/v8/src/arm/code-stubs-arm.cc

@ -189,6 +189,72 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
}
void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
//
// [sp]: function.
// [sp + kPointerSize]: serialized scope info
// Try to allocate the context in new space.
Label gc;
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
__ AllocateInNewSpace(FixedArray::SizeFor(length),
r0, r1, r2, &gc, TAG_OBJECT);
// Load the function from the stack.
__ ldr(r3, MemOperand(sp, 0));
// Load the serialized scope info from the stack.
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
// Setup the object header.
__ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ mov(r2, Operand(Smi::FromInt(length)));
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
// If this block context is nested in the global context we get a smi
// sentinel instead of a function. The block context should get the
// canonical empty function of the global context as its closure which
// we still have to look up.
Label after_sentinel;
__ JumpIfNotSmi(r3, &after_sentinel);
if (FLAG_debug_code) {
const char* message = "Expected 0 as a Smi sentinel";
__ cmp(r3, Operand::Zero());
__ Assert(eq, message);
}
__ ldr(r3, GlobalObjectOperand());
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
__ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
// Setup the fixed slots.
__ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
__ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
__ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
// Copy the global object from the previous context.
__ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
__ str(r1, ContextOperand(r0, Context::GLOBAL_INDEX));
// Initialize the rest of the slots to the hole value.
__ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
for (int i = 0; i < slots_; i++) {
__ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS));
}
// Remove the on-stack argument and return.
__ mov(cp, r0);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
// Need to collect. Call into runtime system.
__ bind(&gc);
__ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
}
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
//
@ -838,9 +904,11 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
__ vmov(d0, r0, r1);
__ vmov(d1, r2, r3);
}
// Call C routine that may not cause GC or other trouble.
__ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
0, 2);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(
ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
}
// Store answer in the overwritable heap number. Double returned in
// registers r0 and r1 or in d0.
if (masm->use_eabi_hardfloat()) {
@ -857,6 +925,29 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
}
bool WriteInt32ToHeapNumberStub::IsPregenerated() {
// These variants are compiled ahead of time. See next method.
if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
return true;
}
if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
return true;
}
// Other register combinations are generated as and when they are needed,
// so it is unsafe to call them from stubs (we can't generate a stub while
// we are generating a stub).
return false;
}
void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
stub1.GetCode()->set_is_pregenerated(true);
stub2.GetCode()->set_is_pregenerated(true);
}
// See comment for class.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
@ -1197,6 +1288,8 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
__ vmov(d0, r0, r1);
__ vmov(d1, r2, r3);
}
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
0, 2);
__ pop(pc); // Return.
@ -1214,7 +1307,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
Label first_non_object;
// Get the type of the first operand into r2 and compare it with
// FIRST_SPEC_OBJECT_TYPE.
@ -1606,6 +1699,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
// The stub expects its argument in the tos_ register and returns its result in
// it, too: zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
// This stub uses VFP3 instructions.
CpuFeatures::Scope scope(VFP3);
@ -1713,6 +1808,41 @@ void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
}
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
// restore them.
__ stm(db_w, sp, kCallerSaved | lr.bit());
if (save_doubles_ == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP3);
__ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
__ vstr(reg, MemOperand(sp, i * kDoubleSize));
}
}
const int argument_count = 1;
const int fp_argument_count = 0;
const Register scratch = r1;
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
__ mov(r0, Operand(ExternalReference::isolate_address()));
__ CallCFunction(
ExternalReference::store_buffer_overflow_function(masm->isolate()),
argument_count);
if (save_doubles_ == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP3);
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
__ vldr(reg, MemOperand(sp, i * kDoubleSize));
}
__ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
}
__ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
}
void UnaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name = NULL; // Make g++ happy.
@ -1866,12 +1996,13 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
__ EnterInternalFrame();
__ push(r0);
__ CallRuntime(Runtime::kNumberAlloc, 0);
__ mov(r1, Operand(r0));
__ pop(r0);
__ LeaveInternalFrame();
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(r0);
__ CallRuntime(Runtime::kNumberAlloc, 0);
__ mov(r1, Operand(r0));
__ pop(r0);
}
__ bind(&heapnumber_allocated);
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
@ -1912,13 +2043,14 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
__ EnterInternalFrame();
__ push(r0); // Push the heap number, not the untagged int32.
__ CallRuntime(Runtime::kNumberAlloc, 0);
__ mov(r2, r0); // Move the new heap number into r2.
// Get the heap number into r0, now that the new heap number is in r2.
__ pop(r0);
__ LeaveInternalFrame();
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(r0); // Push the heap number, not the untagged int32.
__ CallRuntime(Runtime::kNumberAlloc, 0);
__ mov(r2, r0); // Move the new heap number into r2.
// Get the heap number into r0, now that the new heap number is in r2.
__ pop(r0);
}
// Convert the heap number in r0 to an untagged integer in r1.
// This can't go slow-case because it's the same number we already
@ -2028,6 +2160,10 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
void BinaryOpStub::Generate(MacroAssembler* masm) {
// Explicitly allow generation of nested stubs. It is safe here because
// generation code does not use any raw pointers.
AllowStubCallsScope allow_stub_calls(masm, true);
switch (operands_type_) {
case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
@ -3133,10 +3269,11 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
__ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ EnterInternalFrame();
__ push(r0);
__ CallRuntime(RuntimeFunction(), 1);
__ LeaveInternalFrame();
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(r0);
__ CallRuntime(RuntimeFunction(), 1);
}
__ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ Ret();
@ -3149,14 +3286,15 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// We return the value in d2 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
__ EnterInternalFrame();
// Allocate an aligned object larger than a HeapNumber.
ASSERT(4 * kPointerSize >= HeapNumber::kSize);
__ mov(scratch0, Operand(4 * kPointerSize));
__ push(scratch0);
__ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
__ LeaveInternalFrame();
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Allocate an aligned object larger than a HeapNumber.
ASSERT(4 * kPointerSize >= HeapNumber::kSize);
__ mov(scratch0, Operand(4 * kPointerSize));
__ push(scratch0);
__ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
}
__ Ret();
}
}
@ -3173,6 +3311,7 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
} else {
__ vmov(r0, r1, d2);
}
AllowExternalCallThatCantCauseGC scope(masm);
switch (type_) {
case TranscendentalCache::SIN:
__ CallCFunction(ExternalReference::math_sin_double_function(isolate),
@ -3268,11 +3407,14 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(lr);
__ PrepareCallCFunction(1, 1, scratch);
__ SetCallCDoubleArguments(double_base, exponent);
__ CallCFunction(
ExternalReference::power_double_int_function(masm->isolate()),
1, 1);
__ pop(lr);
__ GetCFunctionDoubleResult(double_result);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(
ExternalReference::power_double_int_function(masm->isolate()),
1, 1);
__ pop(lr);
__ GetCFunctionDoubleResult(double_result);
}
__ vstr(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(r0, heapnumber);
@ -3298,11 +3440,14 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(lr);
__ PrepareCallCFunction(0, 2, scratch);
__ SetCallCDoubleArguments(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()),
0, 2);
__ pop(lr);
__ GetCFunctionDoubleResult(double_result);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()),
0, 2);
__ pop(lr);
__ GetCFunctionDoubleResult(double_result);
}
__ vstr(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(r0, heapnumber);
@ -3319,6 +3464,37 @@ bool CEntryStub::NeedsImmovableCode() {
}
bool CEntryStub::IsPregenerated() {
return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
result_size_ == 1;
}
void CodeStub::GenerateStubsAheadOfTime() {
CEntryStub::GenerateAheadOfTime();
WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
}
void CodeStub::GenerateFPStubs() {
CEntryStub save_doubles(1, kSaveFPRegs);
Handle<Code> code = save_doubles.GetCode();
code->set_is_pregenerated(true);
StoreBufferOverflowStub stub(kSaveFPRegs);
stub.GetCode()->set_is_pregenerated(true);
code->GetIsolate()->set_fp_stubs_generated(true);
}
void CEntryStub::GenerateAheadOfTime() {
CEntryStub stub(1, kDontSaveFPRegs);
Handle<Code> code = stub.GetCode();
code->set_is_pregenerated(true);
}
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ Throw(r0);
}
@ -3430,8 +3606,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ b(eq, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
__ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
__ ldr(r3, MemOperand(ip));
__ mov(r3, Operand(isolate->factory()->the_hole_value()));
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ ldr(r0, MemOperand(ip));
@ -3469,6 +3644,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ sub(r6, r6, Operand(kPointerSize));
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles_);
// Setup argc and the builtin function in callee-saved registers.
@ -3613,8 +3789,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// saved values before returning a failure to C.
// Clear any pending exceptions.
__ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
__ ldr(r5, MemOperand(ip));
__ mov(r5, Operand(isolate->factory()->the_hole_value()));
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ str(r5, MemOperand(ip));
@ -3851,10 +4026,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
__ EnterInternalFrame();
__ Push(r0, r1);
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
__ LeaveInternalFrame();
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(r0, r1);
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
}
__ cmp(r0, Operand::Zero());
__ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
__ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
@ -4480,8 +4656,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// For arguments 4 and 3 get string length, calculate start of string data and
// calculate the shift of the index (0 for ASCII and 1 for two byte).
STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
__ add(r8, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
__ eor(r3, r3, Operand(1));
// Load the length from the original subject string from the previous stack
// frame. Therefore we have to use fp, which points exactly to two pointer
@ -4532,8 +4707,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
__ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate)));
__ ldr(r1, MemOperand(r1, 0));
__ mov(r1, Operand(isolate->factory()->the_hole_value()));
__ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ ldr(r0, MemOperand(r2, 0));
@ -4575,16 +4749,25 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ str(r2, FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastCaptureCountOffset));
// Store last subject and last input.
__ mov(r3, last_match_info_elements); // Moved up to reduce latency.
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastSubjectOffset));
__ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
__ mov(r2, subject);
__ RecordWriteField(last_match_info_elements,
RegExpImpl::kLastSubjectOffset,
r2,
r7,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
__ mov(r3, last_match_info_elements);
__ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
__ RecordWriteField(last_match_info_elements,
RegExpImpl::kLastInputOffset,
subject,
r7,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
@ -4712,6 +4895,22 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
void CallFunctionStub::FinishCode(Code* code) {
code->set_has_function_cache(false);
}
void CallFunctionStub::Clear(Heap* heap, Address address) {
UNREACHABLE();
}
Object* CallFunctionStub::GetCachedValue(Address address) {
UNREACHABLE();
return NULL;
}
void CallFunctionStub::Generate(MacroAssembler* masm) {
Label slow, non_function;
@ -6425,12 +6624,13 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
__ EnterInternalFrame();
__ Push(r1, r0);
__ mov(ip, Operand(Smi::FromInt(op_)));
__ push(ip);
__ CallExternalReference(miss, 3);
__ LeaveInternalFrame();
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(r1, r0);
__ mov(ip, Operand(Smi::FromInt(op_)));
__ push(ip);
__ CallExternalReference(miss, 3);
}
// Compute the entry point of the rewritten stub.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@ -6613,6 +6813,8 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
// Registers:
// result: StringDictionary to probe
// r1: key
@ -6702,6 +6904,267 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
}
struct AheadOfTimeWriteBarrierStubList {
Register object, value, address;
RememberedSetAction action;
};
struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Used in RegExpExecStub.
{ r6, r4, r7, EMIT_REMEMBERED_SET },
{ r6, r2, r7, EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
// Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
// Also used in KeyedStoreIC::GenerateGeneric.
{ r3, r4, r5, EMIT_REMEMBERED_SET },
// Used in CompileStoreGlobal.
{ r4, r1, r2, OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
{ r1, r2, r3, EMIT_REMEMBERED_SET },
{ r3, r2, r1, EMIT_REMEMBERED_SET },
// Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
{ r2, r1, r3, EMIT_REMEMBERED_SET },
{ r3, r1, r2, EMIT_REMEMBERED_SET },
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ r4, r2, r3, EMIT_REMEMBERED_SET },
// Null termination.
{ no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
};
bool RecordWriteStub::IsPregenerated() {
for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
if (object_.is(entry->object) &&
value_.is(entry->value) &&
address_.is(entry->address) &&
remembered_set_action_ == entry->action &&
save_fp_regs_mode_ == kDontSaveFPRegs) {
return true;
}
}
return false;
}
bool StoreBufferOverflowStub::IsPregenerated() {
return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
}
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
StoreBufferOverflowStub stub1(kDontSaveFPRegs);
stub1.GetCode()->set_is_pregenerated(true);
}
void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
RecordWriteStub stub(entry->object,
entry->value,
entry->address,
entry->action,
kDontSaveFPRegs);
stub.GetCode()->set_is_pregenerated(true);
}
}
// Takes the input in 3 registers: address_ value_ and object_. A pointer to
// the value has just been written into the object, now this stub makes sure
// we keep the GC informed. The word in the object where the value has been
// written is in the address register.
void RecordWriteStub::Generate(MacroAssembler* masm) {
Label skip_to_incremental_noncompacting;
Label skip_to_incremental_compacting;
// The first two instructions are generated with labels so as to get the
// offset fixed up correctly by the bind(Label*) call. We patch it back and
// forth between a compare instructions (a nop in this position) and the
// real branch when we start and stop incremental heap marking.
// See RecordWriteStub::Patch for details.
__ b(&skip_to_incremental_noncompacting);
__ b(&skip_to_incremental_compacting);
if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
__ RememberedSetHelper(object_,
address_,
value_,
save_fp_regs_mode_,
MacroAssembler::kReturnAtEnd);
}
__ Ret();
__ bind(&skip_to_incremental_noncompacting);
GenerateIncremental(masm, INCREMENTAL);
__ bind(&skip_to_incremental_compacting);
GenerateIncremental(masm, INCREMENTAL_COMPACTION);
// Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
// Will be checked in IncrementalMarking::ActivateGeneratedStub.
ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
PatchBranchIntoNop(masm, 0);
PatchBranchIntoNop(masm, Assembler::kInstrSize);
}
void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.Save(masm);
if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
Label dont_need_remembered_set;
__ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
__ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
regs_.scratch0(),
&dont_need_remembered_set);
__ CheckPageFlag(regs_.object(),
regs_.scratch0(),
1 << MemoryChunk::SCAN_ON_SCAVENGE,
ne,
&dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.
CheckNeedsToInformIncrementalMarker(
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
InformIncrementalMarker(masm, mode);
regs_.Restore(masm);
__ RememberedSetHelper(object_,
address_,
value_,
save_fp_regs_mode_,
MacroAssembler::kReturnAtEnd);
__ bind(&dont_need_remembered_set);
}
CheckNeedsToInformIncrementalMarker(
masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
InformIncrementalMarker(masm, mode);
regs_.Restore(masm);
__ Ret();
}
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
Register address =
r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
ASSERT(!address.is(regs_.object()));
ASSERT(!address.is(r0));
__ Move(address, regs_.address());
__ Move(r0, regs_.object());
if (mode == INCREMENTAL_COMPACTION) {
__ Move(r1, address);
} else {
ASSERT(mode == INCREMENTAL);
__ ldr(r1, MemOperand(address, 0));
}
__ mov(r2, Operand(ExternalReference::isolate_address()));
AllowExternalCallThatCantCauseGC scope(masm);
if (mode == INCREMENTAL_COMPACTION) {
__ CallCFunction(
ExternalReference::incremental_evacuation_record_write_function(
masm->isolate()),
argument_count);
} else {
ASSERT(mode == INCREMENTAL);
__ CallCFunction(
ExternalReference::incremental_marking_record_write_function(
masm->isolate()),
argument_count);
}
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
}
void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode) {
Label on_black;
Label need_incremental;
Label need_incremental_pop_scratch;
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
__ RememberedSetHelper(object_,
address_,
value_,
save_fp_regs_mode_,
MacroAssembler::kReturnAtEnd);
} else {
__ Ret();
}
__ bind(&on_black);
// Get the value from the slot.
__ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
if (mode == INCREMENTAL_COMPACTION) {
Label ensure_not_white;
__ CheckPageFlag(regs_.scratch0(), // Contains value.
regs_.scratch1(), // Scratch.
MemoryChunk::kEvacuationCandidateMask,
eq,
&ensure_not_white);
__ CheckPageFlag(regs_.object(),
regs_.scratch1(), // Scratch.
MemoryChunk::kSkipEvacuationSlotsRecordingMask,
eq,
&need_incremental);
__ bind(&ensure_not_white);
}
// We need extra registers for this, so we push the object and the address
// register temporarily.
__ Push(regs_.object(), regs_.address());
__ EnsureNotWhite(regs_.scratch0(), // The value.
regs_.scratch1(), // Scratch.
regs_.object(), // Scratch.
regs_.address(), // Scratch.
&need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
__ RememberedSetHelper(object_,
address_,
value_,
save_fp_regs_mode_,
MacroAssembler::kReturnAtEnd);
} else {
__ Ret();
}
__ bind(&need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
__ bind(&need_incremental);
// Fall through when we need to inform the incremental marker.
}
#undef __
} } // namespace v8::internal

245
deps/v8/src/arm/code-stubs-arm.h

@ -58,6 +58,25 @@ class TranscendentalCacheStub: public CodeStub {
};
class StoreBufferOverflowStub: public CodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
void Generate(MacroAssembler* masm);
virtual bool IsPregenerated();
static void GenerateFixedRegStubsAheadOfTime();
virtual bool SometimesSetsUpAFrame() { return false; }
private:
SaveFPRegsMode save_doubles_;
Major MajorKey() { return StoreBufferOverflow; }
int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
};
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op,
@ -323,6 +342,9 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
the_heap_number_(the_heap_number),
scratch_(scratch) { }
bool IsPregenerated();
static void GenerateFixedRegStubsAheadOfTime();
private:
Register the_int_;
Register the_heap_number_;
@ -371,6 +393,225 @@ class NumberToStringStub: public CodeStub {
};
class RecordWriteStub: public CodeStub {
public:
RecordWriteStub(Register object,
Register value,
Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
: object_(object),
value_(value),
address_(address),
remembered_set_action_(remembered_set_action),
save_fp_regs_mode_(fp_mode),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
}
enum Mode {
STORE_BUFFER_ONLY,
INCREMENTAL,
INCREMENTAL_COMPACTION
};
virtual bool IsPregenerated();
static void GenerateFixedRegStubsAheadOfTime();
virtual bool SometimesSetsUpAFrame() { return false; }
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos)));
}
static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27);
ASSERT(Assembler::IsBranch(masm->instr_at(pos)));
}
static Mode GetMode(Code* stub) {
Instr first_instruction = Assembler::instr_at(stub->instruction_start());
Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
Assembler::kInstrSize);
if (Assembler::IsBranch(first_instruction)) {
return INCREMENTAL;
}
ASSERT(Assembler::IsTstImmediate(first_instruction));
if (Assembler::IsBranch(second_instruction)) {
return INCREMENTAL_COMPACTION;
}
ASSERT(Assembler::IsTstImmediate(second_instruction));
return STORE_BUFFER_ONLY;
}
static void Patch(Code* stub, Mode mode) {
MacroAssembler masm(NULL,
stub->instruction_start(),
stub->instruction_size());
switch (mode) {
case STORE_BUFFER_ONLY:
ASSERT(GetMode(stub) == INCREMENTAL ||
GetMode(stub) == INCREMENTAL_COMPACTION);
PatchBranchIntoNop(&masm, 0);
PatchBranchIntoNop(&masm, Assembler::kInstrSize);
break;
case INCREMENTAL:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
PatchNopIntoBranch(&masm, 0);
break;
case INCREMENTAL_COMPACTION:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
PatchNopIntoBranch(&masm, Assembler::kInstrSize);
break;
}
ASSERT(GetMode(stub) == mode);
CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize);
}
private:
// This is a helper class for freeing up 3 scratch registers. The input is
// two registers that must be preserved and one scratch register provided by
// the caller.
class RegisterAllocation {
public:
RegisterAllocation(Register object,
Register address,
Register scratch0)
: object_(object),
address_(address),
scratch0_(scratch0) {
ASSERT(!AreAliased(scratch0, object, address, no_reg));
scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
}
void Save(MacroAssembler* masm) {
ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
// We don't have to save scratch0_ because it was given to us as
// a scratch register.
masm->push(scratch1_);
}
void Restore(MacroAssembler* masm) {
masm->pop(scratch1_);
}
// If we have to call into C then we need to save and restore all caller-
// saved registers that were not already preserved. The scratch registers
// will be restored by other means so we don't bother pushing them here.
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
if (mode == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP3);
masm->sub(sp,
sp,
Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
// Save all VFP registers except d0.
for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
}
}
}
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP3);
// Restore all VFP registers except d0.
for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
}
masm->add(sp,
sp,
Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
}
masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
}
inline Register object() { return object_; }
inline Register address() { return address_; }
inline Register scratch0() { return scratch0_; }
inline Register scratch1() { return scratch1_; }
private:
Register object_;
Register address_;
Register scratch0_;
Register scratch1_;
Register GetRegThatIsNotOneOf(Register r1,
Register r2,
Register r3) {
for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(r1)) continue;
if (candidate.is(r2)) continue;
if (candidate.is(r3)) continue;
return candidate;
}
UNREACHABLE();
return no_reg;
}
friend class RecordWriteStub;
};
enum OnNoNeedToInformIncrementalMarker {
kReturnOnNoNeedToInformIncrementalMarker,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
void Generate(MacroAssembler* masm);
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
Major MajorKey() { return RecordWrite; }
int MinorKey() {
return ObjectBits::encode(object_.code()) |
ValueBits::encode(value_.code()) |
AddressBits::encode(address_.code()) |
RememberedSetActionBits::encode(remembered_set_action_) |
SaveFPRegsModeBits::encode(save_fp_regs_mode_);
}
bool MustBeInStubCache() {
// All stubs must be registered in the stub cache
// otherwise IncrementalMarker would not be able to find
// and patch it.
return true;
}
void Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
class ObjectBits: public BitField<int, 0, 4> {};
class ValueBits: public BitField<int, 4, 4> {};
class AddressBits: public BitField<int, 8, 4> {};
class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
Register object_;
Register value_;
Register address_;
RememberedSetAction remembered_set_action_;
SaveFPRegsMode save_fp_regs_mode_;
Label slow_;
RegisterAllocation regs_;
};
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM.
@ -575,6 +816,8 @@ class StringDictionaryLookupStub: public CodeStub {
Register r0,
Register r1);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
@ -587,7 +830,7 @@ class StringDictionaryLookupStub: public CodeStub {
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
Major MajorKey() { return StringDictionaryNegativeLookup; }
Major MajorKey() { return StringDictionaryLookup; }
int MinorKey() {
return LookupModeBits::encode(mode_);

8
deps/v8/src/arm/codegen-arm.cc

@ -38,12 +38,16 @@ namespace internal {
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterInternalFrame();
masm->EnterFrame(StackFrame::INTERNAL);
ASSERT(!masm->has_frame());
masm->set_has_frame(true);
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->LeaveInternalFrame();
masm->LeaveFrame(StackFrame::INTERNAL);
ASSERT(masm->has_frame());
masm->set_has_frame(false);
}

10
deps/v8/src/arm/codegen-arm.h

@ -69,16 +69,6 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
// Constants related to patching of inlined load/store.
static int GetInlinedKeyedLoadInstructionsAfterPatch() {
return FLAG_debug_code ? 32 : 13;
}
static const int kInlinedKeyedStoreInstructionsAfterPatch = 8;
static int GetInlinedNamedStoreInstructionsAfterPatch() {
ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
return Isolate::Current()->inlined_write_barrier_size() + 4;
}
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};

82
deps/v8/src/arm/debug-arm.cc

@ -132,55 +132,57 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs) {
__ EnterInternalFrame();
// Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values
// are stored as a smi causing it to be untouched by GC.
ASSERT((object_regs & ~kJSCallerSaved) == 0);
ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
ASSERT((object_regs & non_object_regs) == 0);
if ((object_regs | non_object_regs) != 0) {
for (int i = 0; i < kNumJSCallerSaved; i++) {
int r = JSCallerSavedCode(i);
Register reg = { r };
if ((non_object_regs & (1 << r)) != 0) {
if (FLAG_debug_code) {
__ tst(reg, Operand(0xc0000000));
__ Assert(eq, "Unable to encode value as smi");
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values
// are stored as a smi causing it to be untouched by GC.
ASSERT((object_regs & ~kJSCallerSaved) == 0);
ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
ASSERT((object_regs & non_object_regs) == 0);
if ((object_regs | non_object_regs) != 0) {
for (int i = 0; i < kNumJSCallerSaved; i++) {
int r = JSCallerSavedCode(i);
Register reg = { r };
if ((non_object_regs & (1 << r)) != 0) {
if (FLAG_debug_code) {
__ tst(reg, Operand(0xc0000000));
__ Assert(eq, "Unable to encode value as smi");
}
__ mov(reg, Operand(reg, LSL, kSmiTagSize));
}
__ mov(reg, Operand(reg, LSL, kSmiTagSize));
}
__ stm(db_w, sp, object_regs | non_object_regs);
}
__ stm(db_w, sp, object_regs | non_object_regs);
}
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
__ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments
__ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
CEntryStub ceb(1);
__ CallStub(&ceb);
// Restore the register values from the expression stack.
if ((object_regs | non_object_regs) != 0) {
__ ldm(ia_w, sp, object_regs | non_object_regs);
for (int i = 0; i < kNumJSCallerSaved; i++) {
int r = JSCallerSavedCode(i);
Register reg = { r };
if ((non_object_regs & (1 << r)) != 0) {
__ mov(reg, Operand(reg, LSR, kSmiTagSize));
}
if (FLAG_debug_code &&
(((object_regs |non_object_regs) & (1 << r)) == 0)) {
__ mov(reg, Operand(kDebugZapValue));
__ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments
__ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
CEntryStub ceb(1);
__ CallStub(&ceb);
// Restore the register values from the expression stack.
if ((object_regs | non_object_regs) != 0) {
__ ldm(ia_w, sp, object_regs | non_object_regs);
for (int i = 0; i < kNumJSCallerSaved; i++) {
int r = JSCallerSavedCode(i);
Register reg = { r };
if ((non_object_regs & (1 << r)) != 0) {
__ mov(reg, Operand(reg, LSR, kSmiTagSize));
}
if (FLAG_debug_code &&
(((object_regs |non_object_regs) & (1 << r)) == 0)) {
__ mov(reg, Operand(kDebugZapValue));
}
}
}
}
__ LeaveInternalFrame();
// Leave the internal frame.
}
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was

34
deps/v8/src/arm/deoptimizer-arm.cc

@ -112,12 +112,19 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
#endif
Isolate* isolate = code->GetIsolate();
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
DeoptimizerData* data = isolate->deoptimizer_data();
node->set_next(data->deoptimizing_code_list_);
data->deoptimizing_code_list_ = node;
// We might be in the middle of incremental marking with compaction.
// Tell collector to treat this code object in a special way and
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(code);
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
@ -134,7 +141,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
@ -169,6 +177,13 @@ void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
reinterpret_cast<uint32_t>(check_code->entry()));
Memory::uint32_at(stack_check_address_pointer) =
reinterpret_cast<uint32_t>(replacement_code->entry());
RelocInfo rinfo(pc_after - 2 * kInstrSize,
RelocInfo::CODE_TARGET,
0,
unoptimized_code);
unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode(
unoptimized_code, &rinfo, replacement_code);
}
@ -193,6 +208,9 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
reinterpret_cast<uint32_t>(replacement_code->entry()));
Memory::uint32_at(stack_check_address_pointer) =
reinterpret_cast<uint32_t>(check_code->entry());
check_code->GetHeap()->incremental_marking()->
RecordCodeTargetPatch(pc_after - 2 * kInstrSize, check_code);
}
@ -632,7 +650,10 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(r5, Operand(ExternalReference::isolate_address()));
__ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
// Call Deoptimizer::New().
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
}
// Preserve "deoptimizer" object in register r0 and get the input
// frame descriptor pointer to r1 (deoptimizer->input_);
@ -686,8 +707,11 @@ void Deoptimizer::EntryGenerator::Generate() {
// r0: deoptimizer object; r1: scratch.
__ PrepareCallCFunction(1, r1);
// Call Deoptimizer::ComputeOutputFrames().
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate), 1);
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate), 1);
}
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.

10
deps/v8/src/arm/frames-arm.h

@ -70,6 +70,16 @@ static const RegList kCalleeSaved =
1 << 10 | // r10 v7
1 << 11; // r11 v8 (fp in JavaScript code)
// When calling into C++ (only for C++ calls that can't cause a GC).
// The call code will take care of lr, fp, etc.
static const RegList kCallerSaved =
1 << 0 | // r0
1 << 1 | // r1
1 << 2 | // r2
1 << 3 | // r3
1 << 9; // r9
static const int kNumCalleeSaved = 7 + kR9Available;
// Double registers d8 to d15 are callee-saved.

195
deps/v8/src/arm/full-codegen-arm.cc

@ -39,6 +39,7 @@
#include "stub-cache.h"
#include "arm/code-stubs-arm.h"
#include "arm/macro-assembler-arm.h"
namespace v8 {
namespace internal {
@ -155,6 +156,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ bind(&ok);
}
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
int locals_count = info->scope()->num_stack_slots();
__ Push(lr, fp, cp, r1);
@ -200,13 +206,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Load parameter from stack.
__ ldr(r0, MemOperand(fp, parameter_offset));
// Store it in the context.
__ mov(r1, Operand(Context::SlotOffset(var->index())));
__ str(r0, MemOperand(cp, r1));
// Update the write barrier. This clobbers all involved
// registers, so we have to use two more registers to avoid
// clobbering cp.
__ mov(r2, Operand(cp));
__ RecordWrite(r2, Operand(r1), r3, r0);
MemOperand target = ContextOperand(cp, var->index());
__ str(r0, target);
// Update the write barrier.
__ RecordWriteContextSlot(
cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs);
}
}
}
@ -665,12 +670,15 @@ void FullCodeGenerator::SetVar(Variable* var,
ASSERT(!scratch1.is(src));
MemOperand location = VarOperand(var, scratch0);
__ str(src, location);
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
__ RecordWrite(scratch0,
Operand(Context::SlotOffset(var->index())),
scratch1,
src);
__ RecordWriteContextSlot(scratch0,
location.offset(),
src,
scratch1,
kLRHasBeenSaved,
kDontSaveFPRegs);
}
}
@ -746,8 +754,14 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
__ str(result_register(), ContextOperand(cp, variable->index()));
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
__ mov(r1, Operand(cp));
__ RecordWrite(r1, Operand(offset), r2, result_register());
__ RecordWriteContextSlot(cp,
offset,
result_register(),
r2,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
} else if (mode == Variable::CONST || mode == Variable::LET) {
Comment cmnt(masm_, "[ Declaration");
@ -1211,9 +1225,17 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == Variable::DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
if (local->mode() == Variable::CONST) {
if (local->mode() == Variable::CONST ||
local->mode() == Variable::LET) {
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
if (local->mode() == Variable::CONST) {
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
} else { // Variable::LET
__ b(ne, done);
__ mov(r0, Operand(var->name()));
__ push(r0);
__ CallRuntime(Runtime::kThrowReferenceError, 1);
}
}
__ jmp(done);
}
@ -1490,14 +1512,23 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForAccumulatorValue(subexpr);
// Store the subexpression value in the array's elements.
__ ldr(r1, MemOperand(sp)); // Copy of array literal.
__ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
__ ldr(r6, MemOperand(sp)); // Copy of array literal.
__ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ str(result_register(), FieldMemOperand(r1, offset));
Label no_map_change;
__ JumpIfSmi(result_register(), &no_map_change);
// Update the write barrier for the array store with r0 as the scratch
// register.
__ RecordWrite(r1, Operand(offset), r2, result_register());
__ RecordWriteField(
r1, offset, result_register(), r2, kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
__ CheckFastSmiOnlyElements(r3, r2, &no_map_change);
__ push(r6); // Copy of array literal.
__ CallRuntime(Runtime::kNonSmiElementStored, 1);
__ bind(&no_map_change);
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
@ -1869,7 +1900,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// RecordWrite may destroy all its register arguments.
__ mov(r3, result_register());
int offset = Context::SlotOffset(var->index());
__ RecordWrite(r1, Operand(offset), r2, r3);
__ RecordWriteContextSlot(
r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
}
}
@ -1887,7 +1919,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ str(r0, location);
if (var->IsContextSlot()) {
__ mov(r3, r0);
__ RecordWrite(r1, Operand(Context::SlotOffset(var->index())), r2, r3);
int offset = Context::SlotOffset(var->index());
__ RecordWriteContextSlot(
r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
}
} else {
ASSERT(var->IsLookupSlot());
@ -2662,20 +2696,24 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
// Assume that there are only two callable types, and one of them is at
// either end of the type range for JS object types. Saves extra comparisons.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
// Map is now in r0.
__ b(lt, &null);
// As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
// FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
// LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
__ cmp(r1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
__ b(ge, &function);
// Check if the constructor in the map is a function.
STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
FIRST_SPEC_OBJECT_TYPE + 1);
__ b(eq, &function);
__ cmp(r1, Operand(LAST_SPEC_OBJECT_TYPE));
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
LAST_SPEC_OBJECT_TYPE - 1);
__ b(eq, &function);
// Assume that there is no larger type.
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
__ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
__ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
__ b(ne, &non_function_constructor);
@ -2853,7 +2891,9 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
__ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
__ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
__ mov(r2, r0);
__ RecordWriteField(
r1, JSValue::kValueOffset, r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
__ bind(&done);
context()->Plug(r0);
@ -3141,16 +3181,31 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ str(scratch1, MemOperand(index2, 0));
__ str(scratch2, MemOperand(index1, 0));
Label new_space;
__ InNewSpace(elements, scratch1, eq, &new_space);
Label no_remembered_set;
__ CheckPageFlag(elements,
scratch1,
1 << MemoryChunk::SCAN_ON_SCAVENGE,
ne,
&no_remembered_set);
// Possible optimization: do a check that both values are Smis
// (or them and test against Smi mask.)
__ mov(scratch1, elements);
__ RecordWriteHelper(elements, index1, scratch2);
__ RecordWriteHelper(scratch1, index2, scratch2); // scratch1 holds elements.
// We are swapping two objects in an array and the incremental marker never
// pauses in the middle of scanning a single object. Therefore the
// incremental marker is not disturbed, so we don't need to call the
// RecordWrite stub that notifies the incremental marker.
__ RememberedSetHelper(elements,
index1,
scratch2,
kDontSaveFPRegs,
MacroAssembler::kFallThroughAtEnd);
__ RememberedSetHelper(elements,
index2,
scratch2,
kDontSaveFPRegs,
MacroAssembler::kFallThroughAtEnd);
__ bind(&new_space);
__ bind(&no_remembered_set);
// We are done. Drop elements from the stack, and return undefined.
__ Drop(3);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
@ -3898,10 +3953,14 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Handle<String> check,
Label* if_true,
Label* if_false,
Label* fall_through) {
Handle<String> check) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
{ AccumulatorValueContext context(this);
VisitForTypeofValue(expr);
}
@ -3942,9 +4001,11 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r0, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
Split(ge, if_true, if_false, fall_through);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE);
__ b(eq, if_true);
__ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE));
Split(eq, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(r0, if_false);
if (!FLAG_harmony_typeof) {
@ -3963,18 +4024,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else {
if (if_false != fall_through) __ jmp(if_false);
}
}
void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
Label* if_true,
Label* if_false,
Label* fall_through) {
VisitForAccumulatorValue(expr);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@ -3982,9 +4032,12 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
SetSourcePosition(expr->position());
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
if (TryLiteralCompare(expr)) return;
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@ -3992,13 +4045,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
context()->Plug(if_true, if_false);
return;
}
Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (op) {
@ -4085,8 +4131,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
Comment cmnt(masm_, "[ CompareToNull");
void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Expression* sub_expr,
NilValue nil) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@ -4094,15 +4141,21 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
VisitForAccumulatorValue(expr->expression());
VisitForAccumulatorValue(sub_expr);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ LoadRoot(r1, Heap::kNullValueRootIndex);
Heap::RootListIndex nil_value = nil == kNullValue ?
Heap::kNullValueRootIndex :
Heap::kUndefinedValueRootIndex;
__ LoadRoot(r1, nil_value);
__ cmp(r0, r1);
if (expr->is_strict()) {
if (expr->op() == Token::EQ_STRICT) {
Split(eq, if_true, if_false, fall_through);
} else {
Heap::RootListIndex other_nil_value = nil == kNullValue ?
Heap::kUndefinedValueRootIndex :
Heap::kNullValueRootIndex;
__ b(eq, if_true);
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
__ LoadRoot(r1, other_nil_value);
__ cmp(r0, r1);
__ b(eq, if_true);
__ JumpIfSmi(r0, if_false);

149
deps/v8/src/arm/ic-arm.cc

@ -208,7 +208,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Update the write barrier. Make sure not to clobber the value.
__ mov(scratch1, value);
__ RecordWrite(elements, scratch2, scratch1);
__ RecordWrite(
elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
}
@ -504,21 +505,22 @@ static void GenerateCallMiss(MacroAssembler* masm,
// Get the receiver of the function from the stack.
__ ldr(r3, MemOperand(sp, argc * kPointerSize));
__ EnterInternalFrame();
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Push the receiver and the name of the function.
__ Push(r3, r2);
// Push the receiver and the name of the function.
__ Push(r3, r2);
// Call the entry.
__ mov(r0, Operand(2));
__ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
// Call the entry.
__ mov(r0, Operand(2));
__ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
CEntryStub stub(1);
__ CallStub(&stub);
CEntryStub stub(1);
__ CallStub(&stub);
// Move result to r1 and leave the internal frame.
__ mov(r1, Operand(r0));
__ LeaveInternalFrame();
// Move result to r1 and leave the internal frame.
__ mov(r1, Operand(r0));
}
// Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC.
@ -650,12 +652,13 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
__ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3);
__ EnterInternalFrame();
__ push(r2); // save the key
__ Push(r1, r2); // pass the receiver and the key
__ CallRuntime(Runtime::kKeyedGetProperty, 2);
__ pop(r2); // restore the key
__ LeaveInternalFrame();
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(r2); // save the key
__ Push(r1, r2); // pass the receiver and the key
__ CallRuntime(Runtime::kKeyedGetProperty, 2);
__ pop(r2); // restore the key
}
__ mov(r1, r0);
__ jmp(&do_call);
@ -908,7 +911,8 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, &notin, &slow);
__ str(r0, mapped_location);
__ add(r6, r3, r5);
__ RecordWrite(r3, r6, r9);
__ mov(r9, r0);
__ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ Ret();
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in r3.
@ -916,7 +920,8 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow);
__ str(r0, unmapped_location);
__ add(r6, r3, r4);
__ RecordWrite(r3, r6, r9);
__ mov(r9, r0);
__ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ Ret();
__ bind(&slow);
GenerateMiss(masm, false);
@ -1267,13 +1272,17 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- r2 : receiver
// -- lr : return address
// -----------------------------------
Label slow, fast, array, extra;
Label slow, array, extra, check_if_double_array;
Label fast_object_with_map_check, fast_object_without_map_check;
Label fast_double_with_map_check, fast_double_without_map_check;
// Register usage.
Register value = r0;
Register key = r1;
Register receiver = r2;
Register elements = r3; // Elements array of the receiver.
Register elements_map = r6;
Register receiver_map = r7;
// r4 and r5 are used as general scratch registers.
// Check that the key is a smi.
@ -1281,35 +1290,26 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, &slow);
// Get the map of the object.
__ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset));
__ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &slow);
// Check if the object is a JS array or not.
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
__ cmp(r4, Operand(JS_ARRAY_TYPE));
__ b(eq, &array);
// Check that the object is some kind of JSObject.
__ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE));
__ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
__ b(lt, &slow);
__ cmp(r4, Operand(JS_PROXY_TYPE));
__ b(eq, &slow);
__ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(eq, &slow);
// Object case: Check key against length in the elements array.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check that the object is in fast mode and writable.
__ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r4, ip);
__ b(ne, &slow);
// Check array bounds. Both the key and the length of FixedArray are smis.
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(ip));
__ b(lo, &fast);
__ b(lo, &fast_object_with_map_check);
// Slow case, handle jump to runtime.
__ bind(&slow);
@ -1330,21 +1330,31 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(ip));
__ b(hs, &slow);
__ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
__ cmp(elements_map,
Operand(masm->isolate()->factory()->fixed_array_map()));
__ b(ne, &check_if_double_array);
// Calculate key + 1 as smi.
STATIC_ASSERT(kSmiTag == 0);
__ add(r4, key, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ b(&fast);
__ b(&fast_object_without_map_check);
__ bind(&check_if_double_array);
__ cmp(elements_map,
Operand(masm->isolate()->factory()->fixed_double_array_map()));
__ b(ne, &slow);
// Add 1 to key, and go to common element store code for doubles.
STATIC_ASSERT(kSmiTag == 0);
__ add(r4, key, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ jmp(&fast_double_without_map_check);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
// is the length is always a smi.
__ bind(&array);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r4, ip);
__ b(ne, &slow);
// Check the key against the length in the array.
__ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
@ -1352,18 +1362,57 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ b(hs, &extra);
// Fall through to fast case.
__ bind(&fast);
// Fast case, store the value to the elements backing store.
__ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value, MemOperand(r5));
// Skip write barrier if the written value is a smi.
__ tst(value, Operand(kSmiTagMask));
__ Ret(eq);
__ bind(&fast_object_with_map_check);
Register scratch_value = r4;
Register address = r5;
__ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
__ cmp(elements_map,
Operand(masm->isolate()->factory()->fixed_array_map()));
__ b(ne, &fast_double_with_map_check);
__ bind(&fast_object_without_map_check);
// Smi stores don't require further checks.
Label non_smi_value;
__ JumpIfNotSmi(value, &non_smi_value);
// It's irrelevant whether array is smi-only or not when writing a smi.
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value, MemOperand(address));
__ Ret();
__ bind(&non_smi_value);
// Escape to slow case when writing non-smi into smi-only array.
__ CheckFastObjectElements(receiver_map, scratch_value, &slow);
// Fast elements array, store the value to the elements backing store.
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value, MemOperand(address));
// Update write barrier for the elements array address.
__ sub(r4, r5, Operand(elements));
__ RecordWrite(elements, Operand(r4), r5, r6);
__ mov(scratch_value, value); // Preserve the value which is returned.
__ RecordWrite(elements,
address,
scratch_value,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Ret();
__ bind(&fast_double_with_map_check);
// Check for fast double array case. If this fails, call through to the
// runtime.
__ cmp(elements_map,
Operand(masm->isolate()->factory()->fixed_double_array_map()));
__ b(ne, &slow);
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value,
key,
receiver,
elements,
r4,
r5,
r6,
r7,
&slow);
__ Ret();
}

38
deps/v8/src/arm/lithium-arm.cc

@ -212,10 +212,11 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
}
void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
InputAt(0)->PrintTo(stream);
stream->Add(is_strict() ? " === null" : " == null");
stream->Add(kind() == kStrictEquality ? " === " : " == ");
stream->Add(nil() == kNullValue ? "null" : "undefined");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
@ -711,7 +712,9 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
instr->set_environment(CreateEnvironment(hydrogen_env));
int argument_index_accumulator = 0;
instr->set_environment(CreateEnvironment(hydrogen_env,
&argument_index_accumulator));
return instr;
}
@ -994,10 +997,13 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
LEnvironment* LChunkBuilder::CreateEnvironment(
HEnvironment* hydrogen_env,
int* argument_index_accumulator) {
if (hydrogen_env == NULL) return NULL;
LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
int value_count = hydrogen_env->length();
@ -1007,7 +1013,6 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
argument_count_,
value_count,
outer);
int argument_index = 0;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@ -1016,7 +1021,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
op = new LArgument(argument_index++);
op = new LArgument((*argument_index_accumulator)++);
} else {
op = UseAny(value);
}
@ -1444,9 +1449,9 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
}
LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
return new LIsNullAndBranch(UseRegisterAtStart(instr->value()));
return new LIsNilAndBranch(UseRegisterAtStart(instr->value()));
}
@ -1734,7 +1739,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LLoadGlobalCell* result = new LLoadGlobalCell;
return instr->check_hole_value()
return instr->RequiresHoleCheck()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
@ -1748,14 +1753,11 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
if (instr->check_hole_value()) {
LOperand* temp = TempRegister();
LOperand* value = UseRegister(instr->value());
return AssignEnvironment(new LStoreGlobalCell(value, temp));
} else {
LOperand* value = UseRegisterAtStart(instr->value());
return new LStoreGlobalCell(value, NULL);
}
LOperand* temp = TempRegister();
LOperand* value = UseTempRegister(instr->value());
LInstruction* result = new LStoreGlobalCell(value, temp);
if (instr->RequiresHoleCheck()) result = AssignEnvironment(result);
return result;
}

16
deps/v8/src/arm/lithium-arm.h

@ -107,7 +107,7 @@ class LCodeGen;
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsNullAndBranch) \
V(IsNilAndBranch) \
V(IsObjectAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@ -627,16 +627,17 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
};
class LIsNullAndBranch: public LControlInstruction<1, 0> {
class LIsNilAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsNullAndBranch(LOperand* value) {
explicit LIsNilAndBranch(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
bool is_strict() const { return hydrogen()->is_strict(); }
EqualityKind kind() const { return hydrogen()->kind(); }
NilValue nil() const { return hydrogen()->nil(); }
virtual void PrintDataTo(StringStream* stream);
};
@ -2159,7 +2160,8 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment();
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
int* argument_index_accumulator);
void VisitInstruction(HInstruction* current);

182
deps/v8/src/arm/lithium-codegen-arm.cc

@ -82,6 +82,14 @@ bool LCodeGen::GenerateCode() {
status_ = GENERATING;
CpuFeatures::Scope scope1(VFP3);
CpuFeatures::Scope scope2(ARMv7);
CodeStub::GenerateFPStubs();
// Open a frame scope to indicate that there is a frame on the stack. The
// NONE indicates that the scope shouldn't actually generate code to set up
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::NONE);
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
@ -206,13 +214,11 @@ bool LCodeGen::GeneratePrologue() {
// Load parameter from stack.
__ ldr(r0, MemOperand(fp, parameter_offset));
// Store it in the context.
__ mov(r1, Operand(Context::SlotOffset(var->index())));
__ str(r0, MemOperand(cp, r1));
// Update the write barrier. This clobbers all involved
// registers, so we have to use two more registers to avoid
// clobbering cp.
__ mov(r2, Operand(cp));
__ RecordWrite(r2, Operand(r1), r3, r0);
MemOperand target = ContextOperand(cp, var->index());
__ str(r0, target);
// Update the write barrier. This clobbers r3 and r0.
__ RecordWriteContextSlot(
cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs);
}
}
Comment(";;; End allocate local context");
@ -262,6 +268,9 @@ bool LCodeGen::GenerateDeferredCode() {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
Comment(";;; Deferred code @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
code->Generate();
__ jmp(code->exit());
}
@ -739,7 +748,7 @@ void LCodeGen::RecordSafepoint(
int deoptimization_index) {
ASSERT(expected_safepoint_kind_ == kind);
const ZoneList<LOperand*>* operands = pointers->operands();
const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
kind, arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
@ -1032,6 +1041,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
virtual void Generate() {
codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
}
virtual LInstruction* instr() { return instr_; }
private:
LDivI* instr_;
};
@ -1743,25 +1753,35 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
}
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
Register scratch = scratch0();
Register reg = ToRegister(instr->InputAt(0));
int false_block = chunk_->LookupDestination(instr->false_block_id());
// TODO(fsc): If the expression is known to be a smi, then it's
// definitely not null. Jump to the false block.
// If the expression is known to be untagged or a smi, then it's definitely
// not null, and it can't be a an undetectable object.
if (instr->hydrogen()->representation().IsSpecialization() ||
instr->hydrogen()->type().IsSmi()) {
EmitGoto(false_block);
return;
}
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
__ LoadRoot(ip, Heap::kNullValueRootIndex);
Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
Heap::kNullValueRootIndex :
Heap::kUndefinedValueRootIndex;
__ LoadRoot(ip, nil_value);
__ cmp(reg, ip);
if (instr->is_strict()) {
if (instr->kind() == kStrictEquality) {
EmitBranch(true_block, false_block, eq);
} else {
Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
Heap::kUndefinedValueRootIndex :
Heap::kNullValueRootIndex;
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ b(eq, true_label);
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ LoadRoot(ip, other_nil_value);
__ cmp(reg, ip);
__ b(eq, true_label);
__ JumpIfSmi(reg, false_label);
@ -1918,28 +1938,36 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
ASSERT(!input.is(temp));
ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
__ JumpIfSmi(input, is_false);
__ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
__ b(lt, is_false);
// Map is now in temp.
// Functions have class 'Function'.
__ CompareInstanceType(temp, temp2, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) {
__ b(ge, is_true);
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
FIRST_SPEC_OBJECT_TYPE + 1);
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
LAST_SPEC_OBJECT_TYPE - 1);
STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
__ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
__ b(lt, is_false);
__ b(eq, is_true);
__ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
__ b(eq, is_true);
} else {
__ b(ge, is_false);
// Faster code path to avoid two compares: subtract lower bound from the
// actual type and do a signed compare with the width of the type range.
__ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
__ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
__ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ b(gt, is_false);
}
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
__ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
// As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type and
// FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
// LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
// Objects with a non-function constructor have class 'Object'.
__ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
if (class_name->IsEqualTo(CStrVector("Object"))) {
@ -2016,9 +2044,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
virtual void Generate() {
codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
}
virtual LInstruction* instr() { return instr_; }
Label* map_check() { return &map_check_; }
private:
LInstanceOfKnownGlobal* instr_;
Label map_check_;
@ -2180,7 +2207,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
__ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
__ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
if (instr->hydrogen()->check_hole_value()) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
DeoptimizeIf(eq, instr->environment());
@ -2203,6 +2230,7 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->InputAt(0));
Register scratch = scratch0();
Register scratch2 = ToRegister(instr->TempAt(0));
// Load the cell.
__ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
@ -2211,8 +2239,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted.
if (instr->hydrogen()->check_hole_value()) {
Register scratch2 = ToRegister(instr->TempAt(0));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch2,
FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
@ -2222,6 +2249,15 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// Store the value.
__ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
// Cells are always in the remembered set.
__ RecordWriteField(scratch,
JSGlobalPropertyCell::kValueOffset,
value,
scratch2,
kLRHasBeenSaved,
kSaveFPRegs,
OMIT_REMEMBERED_SET);
}
@ -2247,10 +2283,15 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
__ str(value, ContextOperand(context, instr->slot_index()));
MemOperand target = ContextOperand(context, instr->slot_index());
__ str(value, target);
if (instr->needs_write_barrier()) {
int offset = Context::SlotOffset(instr->slot_index());
__ RecordWrite(context, Operand(offset), value, scratch0());
__ RecordWriteContextSlot(context,
target.offset(),
value,
scratch0(),
kLRHasBeenSaved,
kSaveFPRegs);
}
}
@ -2500,13 +2541,9 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
}
if (instr->hydrogen()->RequiresHoleCheck()) {
// TODO(danno): If no hole check is required, there is no need to allocate
// elements into a temporary register, instead scratch can be used.
__ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr->environment());
}
__ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr->environment());
__ vldr(result, elements, 0);
}
@ -2577,6 +2614,7 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -2906,6 +2944,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
virtual void Generate() {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
virtual LInstruction* instr() { return instr_; }
private:
LUnaryMathOperation* instr_;
};
@ -3202,7 +3241,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ Drop(1);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -3262,7 +3301,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ str(value, FieldMemOperand(object, offset));
if (instr->needs_write_barrier()) {
// Update the write barrier for the object for in-object properties.
__ RecordWrite(object, Operand(offset), value, scratch);
__ RecordWriteField(
object, offset, value, scratch, kLRHasBeenSaved, kSaveFPRegs);
}
} else {
__ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
@ -3270,7 +3310,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->needs_write_barrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
__ RecordWrite(scratch, Operand(offset), value, object);
__ RecordWriteField(
scratch, offset, value, object, kLRHasBeenSaved, kSaveFPRegs);
}
}
}
@ -3301,6 +3342,13 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
Register scratch = scratch0();
// This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
// conversion, so it deopts in that case.
if (instr->hydrogen()->ValueNeedsSmiCheck()) {
__ tst(value, Operand(kSmiTagMask));
DeoptimizeIf(ne, instr->environment());
}
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@ -3315,8 +3363,8 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Compute address of modified element and store it into key register.
__ add(key, scratch, Operand(FixedArray::kHeaderSize));
__ RecordWrite(elements, key, value);
__ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ RecordWrite(elements, key, value, kLRHasBeenSaved, kSaveFPRegs);
}
}
@ -3417,6 +3465,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3452,6 +3501,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@ -3575,6 +3625,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LStringCharFromCode* instr_;
};
@ -3646,6 +3697,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LNumberTagI* instr_;
};
@ -3711,6 +3763,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LNumberTagD* instr_;
};
@ -3819,16 +3872,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
}
class DeferredTaggedToI: public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
private:
LTaggedToI* instr_;
};
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register input_reg = ToRegister(instr->InputAt(0));
Register scratch1 = scratch0();
@ -3911,6 +3954,16 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
class DeferredTaggedToI: public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LTaggedToI* instr_;
};
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
@ -4343,10 +4396,12 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = ne;
} else if (type_name->Equals(heap()->function_symbol())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
__ CompareObjectType(input, input, scratch,
FIRST_CALLABLE_SPEC_OBJECT_TYPE);
final_branch_condition = ge;
__ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE);
__ b(eq, true_label);
__ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE));
final_branch_condition = eq;
} else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label);
@ -4468,6 +4523,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LStackCheck* instr_;
};

7
deps/v8/src/arm/lithium-codegen-arm.h

@ -376,16 +376,20 @@ class LCodeGen BASE_EMBEDDED {
class LDeferredCode: public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen), external_exit_(NULL) {
: codegen_(codegen),
external_exit_(NULL),
instruction_index_(codegen->current_instruction_) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() { }
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label *exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
@ -396,6 +400,7 @@ class LDeferredCode: public ZoneObject {
Label entry_;
Label exit_;
Label* external_exit_;
int instruction_index_;
};
} } // namespace v8::internal

566
deps/v8/src/arm/macro-assembler-arm.cc

@ -42,7 +42,8 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
allow_stub_calls_(true) {
allow_stub_calls_(true),
has_frame_(false) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
isolate());
@ -406,32 +407,6 @@ void MacroAssembler::StoreRoot(Register source,
}
void MacroAssembler::RecordWriteHelper(Register object,
Register address,
Register scratch) {
if (emit_debug_code()) {
// Check that the object is not in new space.
Label not_in_new_space;
InNewSpace(object, scratch, ne, &not_in_new_space);
Abort("new-space object passed to RecordWriteHelper");
bind(&not_in_new_space);
}
// Calculate page address.
Bfc(object, 0, kPageSizeBits);
// Calculate region number.
Ubfx(address, address, Page::kRegionSizeLog2,
kPageSizeBits - Page::kRegionSizeLog2);
// Mark region dirty.
ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
mov(ip, Operand(1));
orr(scratch, scratch, Operand(ip, LSL, address));
str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
}
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cond,
@ -443,38 +418,52 @@ void MacroAssembler::InNewSpace(Register object,
}
// Will clobber 4 registers: object, offset, scratch, ip. The
// register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
void MacroAssembler::RecordWrite(Register object,
Operand offset,
Register scratch0,
Register scratch1) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are cp.
ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
void MacroAssembler::RecordWriteField(
Register object,
int offset,
Register value,
Register dst,
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
// First, test that the object is not in the new space. We cannot set
// region marks for new space pages.
InNewSpace(object, scratch0, eq, &done);
// Skip barrier if writing a smi.
if (smi_check == INLINE_SMI_CHECK) {
JumpIfSmi(value, &done);
}
// Add offset into the object.
add(scratch0, object, offset);
// Although the object register is tagged, the offset is relative to the start
// of the object, so so offset must be a multiple of kPointerSize.
ASSERT(IsAligned(offset, kPointerSize));
// Record the actual write.
RecordWriteHelper(object, scratch0, scratch1);
add(dst, object, Operand(offset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
tst(dst, Operand((1 << kPointerSizeLog2) - 1));
b(eq, &ok);
stop("Unaligned cell in write barrier");
bind(&ok);
}
RecordWrite(object,
dst,
value,
lr_status,
save_fp,
remembered_set_action,
OMIT_SMI_CHECK);
bind(&done);
// Clobber all input registers when running with the debug-code flag
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
mov(object, Operand(BitCast<int32_t>(kZapValue)));
mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
}
}
@ -484,29 +473,94 @@ void MacroAssembler::RecordWrite(Register object,
// tag is shifted away.
void MacroAssembler::RecordWrite(Register object,
Register address,
Register scratch) {
Register value,
LinkRegisterStatus lr_status,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are cp.
ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
ASSERT(!address.is(cp) && !value.is(cp));
Label done;
// First, test that the object is not in the new space. We cannot set
// region marks for new space pages.
InNewSpace(object, scratch, eq, &done);
if (smi_check == INLINE_SMI_CHECK) {
ASSERT_EQ(0, kSmiTag);
tst(value, Operand(kSmiTagMask));
b(eq, &done);
}
CheckPageFlag(value,
value, // Used as scratch.
MemoryChunk::kPointersToHereAreInterestingMask,
eq,
&done);
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
eq,
&done);
// Record the actual write.
RecordWriteHelper(object, address, scratch);
if (lr_status == kLRHasNotBeenSaved) {
push(lr);
}
RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
CallStub(&stub);
if (lr_status == kLRHasNotBeenSaved) {
pop(lr);
}
bind(&done);
// Clobber all input registers when running with the debug-code flag
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
mov(object, Operand(BitCast<int32_t>(kZapValue)));
mov(address, Operand(BitCast<int32_t>(kZapValue)));
mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
}
}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address,
Register scratch,
SaveFPRegsMode fp_mode,
RememberedSetFinalAction and_then) {
Label done;
if (FLAG_debug_code) {
Label ok;
JumpIfNotInNewSpace(object, scratch, &ok);
stop("Remembered set pointer is in new space");
bind(&ok);
}
// Load store buffer top.
ExternalReference store_buffer =
ExternalReference::store_buffer_top(isolate());
mov(ip, Operand(store_buffer));
ldr(scratch, MemOperand(ip));
// Store pointer to buffer and increment buffer top.
str(address, MemOperand(scratch, kPointerSize, PostIndex));
// Write back new top of buffer.
str(scratch, MemOperand(ip));
// Call stub on end of buffer.
// Check for end of buffer.
tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
if (and_then == kFallThroughAtEnd) {
b(eq, &done);
} else {
ASSERT(and_then == kReturnAtEnd);
Ret(ne);
}
push(lr);
StoreBufferOverflowStub store_buffer_overflow =
StoreBufferOverflowStub(fp_mode);
CallStub(&store_buffer_overflow);
pop(lr);
bind(&done);
if (and_then == kReturnAtEnd) {
Ret();
}
}
@ -961,6 +1015,9 @@ void MacroAssembler::InvokeCode(Register code,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
@ -988,6 +1045,9 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
RelocInfo::Mode rmode,
InvokeFlag flag,
CallKind call_kind) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
Label done;
InvokePrologue(expected, actual, code, no_reg, &done, flag,
@ -1011,6 +1071,9 @@ void MacroAssembler::InvokeFunction(Register fun,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in r1.
ASSERT(fun.is(r1));
@ -1035,6 +1098,9 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
const ParameterCount& actual,
InvokeFlag flag,
CallKind call_kind) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
ASSERT(function->is_compiled());
// Get the function and setup the context.
@ -1090,10 +1156,10 @@ void MacroAssembler::IsObjectJSStringType(Register object,
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
ASSERT(allow_stub_calls());
mov(r0, Operand(0, RelocInfo::NONE));
mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
CEntryStub ces(1);
ASSERT(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
#endif
@ -1793,13 +1859,127 @@ void MacroAssembler::CompareRoot(Register obj,
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_ELEMENTS == 0);
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 1);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
b(hi, fail);
}
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 1);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
b(ls, fail);
cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
b(hi, fail);
}
void MacroAssembler::CheckFastSmiOnlyElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
b(hi, fail);
}
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register receiver_reg,
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Label* fail) {
Label smi_value, maybe_nan, have_double_value, is_nan, done;
Register mantissa_reg = scratch2;
Register exponent_reg = scratch3;
// Handle smi values specially.
JumpIfSmi(value_reg, &smi_value);
// Ensure that the object is a heap number
CheckMap(value_reg,
scratch1,
isolate()->factory()->heap_number_map(),
fail,
DONT_DO_SMI_CHECK);
// Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
// in the exponent.
mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
cmp(exponent_reg, scratch1);
b(ge, &maybe_nan);
ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
bind(&have_double_value);
add(scratch1, elements_reg,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
str(exponent_reg, FieldMemOperand(scratch1, offset));
jmp(&done);
bind(&maybe_nan);
// Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
// it's an Infinity, and the non-NaN code path applies.
b(gt, &is_nan);
ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
cmp(mantissa_reg, Operand(0));
b(eq, &have_double_value);
bind(&is_nan);
// Load canonical NaN for storing into the double array.
uint64_t nan_int64 = BitCast<uint64_t>(
FixedDoubleArray::canonical_not_the_hole_nan_as_double());
mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
jmp(&have_double_value);
bind(&smi_value);
add(scratch1, elements_reg,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
add(scratch1, scratch1,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
// scratch1 is now effective address of the double element
FloatingPointHelper::Destination destination;
if (CpuFeatures::IsSupported(VFP3)) {
destination = FloatingPointHelper::kVFPRegisters;
} else {
destination = FloatingPointHelper::kCoreRegisters;
}
Register untagged_value = receiver_reg;
SmiUntag(untagged_value, value_reg);
FloatingPointHelper::ConvertIntToDouble(this,
untagged_value,
destination,
d0,
mantissa_reg,
exponent_reg,
scratch4,
s2);
if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatures::Scope scope(VFP3);
vstr(d0, scratch1, 0);
} else {
str(mantissa_reg, MemOperand(scratch1, 0));
str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
}
bind(&done);
}
void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
@ -1895,13 +2075,13 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond);
}
MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) {
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Object* result;
{ MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result;
@ -1913,13 +2093,12 @@ MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) {
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Object* result;
{ MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result;
@ -2022,6 +2201,12 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
}
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
}
void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
add(sp, sp, Operand(num_arguments * kPointerSize));
@ -2417,8 +2602,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
mov(r0, Operand(function->nargs));
mov(r1, Operand(ExternalReference(function, isolate())));
CEntryStub stub(1);
stub.SaveDoubles();
CEntryStub stub(1, kSaveFPRegs);
CallStub(&stub);
}
@ -2491,6 +2675,9 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference(
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a builtin without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
GetBuiltinEntry(r2, id);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(r2));
@ -2622,14 +2809,20 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment(msg);
}
#endif
// Disable stub call restrictions to always allow calls to abort.
AllowStubCallsScope allow_scope(this, true);
mov(r0, Operand(p0));
push(r0);
mov(r0, Operand(Smi::FromInt(p1 - p0)));
push(r0);
CallRuntime(Runtime::kAbort, 2);
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
CallRuntime(Runtime::kAbort, 2);
} else {
CallRuntime(Runtime::kAbort, 2);
}
// will not return here
if (is_const_pool_blocked()) {
// If the calling code cares about the exact number of
@ -2930,6 +3123,19 @@ void MacroAssembler::CopyBytes(Register src,
}
void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
Register end_offset,
Register filler) {
Label loop, entry;
b(&entry);
bind(&loop);
str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
bind(&entry);
cmp(start_offset, end_offset);
b(lt, &loop);
}
void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
Register source, // Input.
Register scratch) {
@ -3089,23 +3295,15 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
void MacroAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
CallCFunctionHelper(no_reg,
function,
ip,
num_reg_arguments,
num_double_arguments);
mov(ip, Operand(function));
CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
}
void MacroAssembler::CallCFunction(Register function,
Register scratch,
int num_reg_arguments,
int num_double_arguments) {
CallCFunctionHelper(function,
ExternalReference::the_hole_value_location(isolate()),
scratch,
num_reg_arguments,
num_double_arguments);
int num_reg_arguments,
int num_double_arguments) {
CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
}
@ -3116,17 +3314,15 @@ void MacroAssembler::CallCFunction(ExternalReference function,
void MacroAssembler::CallCFunction(Register function,
Register scratch,
int num_arguments) {
CallCFunction(function, scratch, num_arguments, 0);
CallCFunction(function, num_arguments, 0);
}
void MacroAssembler::CallCFunctionHelper(Register function,
ExternalReference function_reference,
Register scratch,
int num_reg_arguments,
int num_double_arguments) {
ASSERT(has_frame());
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
// provides more information.
@ -3150,10 +3346,6 @@ void MacroAssembler::CallCFunctionHelper(Register function,
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.
if (function.is(no_reg)) {
mov(scratch, Operand(function_reference));
function = scratch;
}
Call(function);
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
@ -3185,6 +3377,185 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
}
void MacroAssembler::CheckPageFlag(
Register object,
Register scratch,
int mask,
Condition cc,
Label* condition_met) {
and_(scratch, object, Operand(~Page::kPageAlignmentMask));
ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
tst(scratch, Operand(mask));
b(cc, condition_met);
}
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black) {
HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
}
void MacroAssembler::HasColor(Register object,
Register bitmap_scratch,
Register mask_scratch,
Label* has_color,
int first_bit,
int second_bit) {
ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
GetMarkBits(object, bitmap_scratch, mask_scratch);
Label other_color, word_boundary;
ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
tst(ip, Operand(mask_scratch));
b(first_bit == 1 ? eq : ne, &other_color);
// Shift left 1 by adding.
add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
b(eq, &word_boundary);
tst(ip, Operand(mask_scratch));
b(second_bit == 1 ? ne : eq, has_color);
jmp(&other_color);
bind(&word_boundary);
ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
tst(ip, Operand(1));
b(second_bit == 1 ? ne : eq, has_color);
bind(&other_color);
}
// Detect some, but not all, common pointer-free objects. This is used by the
// incremental write barrier which doesn't care about oddballs (they are always
// marked black immediately so this code is not hit).
void MacroAssembler::JumpIfDataObject(Register value,
Register scratch,
Label* not_data_object) {
Label is_data_object;
ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
b(eq, &is_data_object);
ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
b(ne, not_data_object);
bind(&is_data_object);
}
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
mov(ip, Operand(1));
mov(mask_reg, Operand(ip, LSL, mask_reg));
}
void MacroAssembler::EnsureNotWhite(
Register value,
Register bitmap_scratch,
Register mask_scratch,
Register load_scratch,
Label* value_is_white_and_not_data) {
ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
Label done;
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
tst(mask_scratch, load_scratch);
b(ne, &done);
if (FLAG_debug_code) {
// Check for impossible bit pattern.
Label ok;
// LSL may overflow, making the check conservative.
tst(load_scratch, Operand(mask_scratch, LSL, 1));
b(eq, &ok);
stop("Impossible marking bit pattern");
bind(&ok);
}
// Value is white. We check whether it is data that doesn't need scanning.
// Currently only checks for HeapNumber and non-cons strings.
Register map = load_scratch; // Holds map while checking type.
Register length = load_scratch; // Holds length of object after testing type.
Label is_data_object;
// Check for heap-number
ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
CompareRoot(map, Heap::kHeapNumberMapRootIndex);
mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
b(eq, &is_data_object);
// Check for strings.
ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
Register instance_type = load_scratch;
ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
b(ne, value_is_white_and_not_data);
// It's a non-indirect (non-cons and non-slice) string.
// If it's external, the length is just ExternalString::kSize.
// Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
// External strings are the only ones with the kExternalStringTag bit
// set.
ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
tst(instance_type, Operand(kExternalStringTag));
mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
b(ne, &is_data_object);
// Sequential string, either ASCII or UC16.
// For ASCII (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
ldr(ip, FieldMemOperand(value, String::kLengthOffset));
tst(instance_type, Operand(kStringEncodingMask));
mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
and_(length, length, Operand(~kObjectAlignmentMask));
bind(&is_data_object);
// Value is a data object, and it is white. Mark it black. Since we know
// that the object is white we can make it black by flipping one bit.
ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
orr(ip, ip, Operand(mask_scratch));
str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
add(ip, ip, Operand(length));
str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
bind(&done);
}
void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
Usat(output_reg, 8, Operand(input_reg));
}
@ -3234,6 +3605,17 @@ void MacroAssembler::LoadInstanceDescriptors(Register map,
}
bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
if (r1.is(r2)) return true;
if (r1.is(r3)) return true;
if (r1.is(r4)) return true;
if (r2.is(r3)) return true;
if (r2.is(r4)) return true;
if (r3.is(r4)) return true;
return false;
}
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),

226
deps/v8/src/arm/macro-assembler-arm.h

@ -29,6 +29,7 @@
#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
#include "assembler.h"
#include "frames.h"
#include "v8globals.h"
namespace v8 {
@ -79,6 +80,14 @@ enum ObjectToDoubleFlags {
};
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
bool AreAliased(Register r1, Register r2, Register r3, Register r4);
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
@ -157,40 +166,126 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
Condition cond = al);
// ---------------------------------------------------------------------------
// GC Support
void IncrementalMarkingRecordWriteHelper(Register object,
Register value,
Register address);
enum RememberedSetFinalAction {
kReturnAtEnd,
kFallThroughAtEnd
};
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
// in new space.
void RememberedSetHelper(Register object, // Used for debug code.
Register addr,
Register scratch,
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then);
void CheckPageFlag(Register object,
Register scratch,
int mask,
Condition cc,
Label* condition_met);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,
Register scratch,
Label* branch) {
InNewSpace(object, scratch, ne, branch);
}
// Check if object is in new space.
// scratch can be object itself, but it will be clobbered.
void InNewSpace(Register object,
Register scratch,
Condition cond, // eq for new space, ne otherwise
Label* branch);
// Check if object is in new space. Jumps if the object is in new space.
// The register scratch can be object itself, but it will be clobbered.
void JumpIfInNewSpace(Register object,
Register scratch,
Label* branch) {
InNewSpace(object, scratch, eq, branch);
}
// For the page containing |object| mark the region covering [address]
// dirty. The object address must be in the first 8K of an allocated page.
void RecordWriteHelper(Register object,
Register address,
Register scratch);
// Check if an object has a given incremental marking color.
void HasColor(Register object,
Register scratch0,
Register scratch1,
Label* has_color,
int first_bit,
int second_bit);
// For the page containing |object| mark the region covering
// [object+offset] dirty. The object address must be in the first 8K
// of an allocated page. The 'scratch' registers are used in the
// implementation and all 3 registers are clobbered by the
// operation, as well as the ip register. RecordWrite updates the
// write barrier even when storing smis.
void RecordWrite(Register object,
Operand offset,
void JumpIfBlack(Register object,
Register scratch0,
Register scratch1);
Register scratch1,
Label* on_black);
// Checks the color of an object. If the object is already grey or black
// then we just fall through, since it is already live. If it is white and
// we can determine that it doesn't need to be scanned, then we just mark it
// black and fall through. For the rest we jump to the label so the
// incremental marker can fix its assumptions.
void EnsureNotWhite(Register object,
Register scratch1,
Register scratch2,
Register scratch3,
Label* object_is_white_and_not_data);
// For the page containing |object| mark the region covering
// [address] dirty. The object address must be in the first 8K of an
// allocated page. All 3 registers are clobbered by the operation,
// as well as the ip register. RecordWrite updates the write barrier
// even when storing smis.
void RecordWrite(Register object,
Register address,
Register scratch);
// Detects conservatively whether an object is data-only, ie it does need to
// be scanned by the garbage collector.
void JumpIfDataObject(Register value,
Register scratch,
Label* not_data_object);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldOperand(reg, off).
void RecordWriteField(
Register object,
int offset,
Register value,
Register scratch,
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
// As above, but the offset has the tag presubtracted. For use with
// MemOperand(reg, off).
inline void RecordWriteContextSlot(
Register context,
int offset,
Register value,
Register scratch,
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK) {
RecordWriteField(context,
offset + kHeapObjectTag,
value,
scratch,
lr_status,
save_fp,
remembered_set_action,
smi_check);
}
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
// address registers are clobbered by the operation.
void RecordWrite(
Register object,
Register address,
Register value,
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
// Push a handle.
void Push(Handle<Object> handle);
@ -318,16 +413,6 @@ class MacroAssembler: public Assembler {
const double imm,
const Condition cond = al);
// ---------------------------------------------------------------------------
// Activation frames
void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
void EnterExitFrame(bool save_doubles, int stack_space = 0);
@ -569,6 +654,13 @@ class MacroAssembler: public Assembler {
Register length,
Register scratch);
// Initialize fields with filler values. Fields starting at |start_offset|
// not including end_offset are overwritten with the value in |filler|. At
// the end the loop, |start_offset| takes the value of |end_offset|.
void InitializeFieldsWithFiller(Register start_offset,
Register end_offset,
Register filler);
// ---------------------------------------------------------------------------
// Support functions.
@ -608,6 +700,31 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* fail);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map,
Register scratch,
Label* fail);
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
void CheckFastSmiOnlyElements(Register map,
Register scratch,
Label* fail);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
// the FastDoubleElements array elements, otherwise jump to fail.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register receiver_reg,
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Label* fail);
// Check if the map of an object is equal to a specified map (either
// given directly or as an index into the root list) and branch to
// label if not. Skip the smi check if not required (object is known
@ -830,11 +947,11 @@ class MacroAssembler: public Assembler {
// return address (unless this is somehow accounted for by the called
// function).
void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, Register scratch, int num_arguments);
void CallCFunction(Register function, int num_arguments);
void CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments);
void CallCFunction(Register function, Register scratch,
void CallCFunction(Register function,
int num_reg_arguments,
int num_double_arguments);
@ -902,6 +1019,9 @@ class MacroAssembler: public Assembler {
bool generating_stub() { return generating_stub_; }
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
inline bool AllowThisStubCall(CodeStub* stub);
// EABI variant for double arguments in use.
bool use_eabi_hardfloat() {
@ -1048,10 +1168,12 @@ class MacroAssembler: public Assembler {
void LoadInstanceDescriptors(Register map, Register descriptors);
// Activation support.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
private:
void CallCFunctionHelper(Register function,
ExternalReference function_reference,
Register scratch,
int num_reg_arguments,
int num_double_arguments);
@ -1067,16 +1189,25 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper,
CallKind call_kind);
// Activation support.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
void InitializeNewString(Register string,
Register length,
Heap::RootListIndex map_index,
Register scratch1,
Register scratch2);
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object,
Register scratch,
Condition cond, // eq for new space, ne otherwise.
Label* branch);
// Helper for finding the mark bits for an address. Afterwards, the
// bitmap register points at the word with the mark bits and the mask
// the position of the first bit. Leaves addr_reg unchanged.
inline void GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg);
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
MemOperand SafepointRegisterSlot(Register reg);
@ -1084,6 +1215,7 @@ class MacroAssembler: public Assembler {
bool generating_stub_;
bool allow_stub_calls_;
bool has_frame_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;

15
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -371,9 +371,12 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
// Isolate.
__ mov(r3, Operand(ExternalReference::isolate_address()));
ExternalReference function =
ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
__ CallCFunction(function, argument_count);
{
AllowExternalCallThatCantCauseGC scope(masm_);
ExternalReference function =
ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
__ CallCFunction(function, argument_count);
}
// Check if function returned non-zero for success or zero for failure.
__ cmp(r0, Operand(0, RelocInfo::NONE));
@ -611,6 +614,12 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Entry code:
__ bind(&entry_label_);
// Tell the system that we have a stack frame. Because the type is MANUAL, no
// is generated.
FrameScope scope(masm_, StackFrame::MANUAL);
// Actually emit code to start a new stack frame.
// Push arguments
// Save callee-save registers.
// Start new stack frame.

2
deps/v8/src/arm/simulator-arm.cc

@ -1618,6 +1618,8 @@ void Simulator::HandleRList(Instruction* instr, bool load) {
ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address);
intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
// Catch null pointers a little earlier.
ASSERT(start_address > 8191 || start_address < 0);
int reg = 0;
while (rlist != 0) {
if ((rlist & 1) != 0) {

324
deps/v8/src/arm/stub-cache-arm.cc

@ -431,7 +431,13 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Pass the now unused name_reg as a scratch register.
__ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
__ mov(name_reg, r0);
__ RecordWriteField(receiver_reg,
offset,
name_reg,
scratch,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@ -444,7 +450,13 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
__ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
__ mov(name_reg, r0);
__ RecordWriteField(scratch,
offset,
name_reg,
receiver_reg,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
}
// Return the value (register r0).
@ -553,9 +565,10 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
}
static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
const CallOptimization& optimization,
int argc) {
static MaybeObject* GenerateFastApiDirectCall(
MacroAssembler* masm,
const CallOptimization& optimization,
int argc) {
// ----------- S t a t e -------------
// -- sp[0] : holder (set by CheckPrototypes)
// -- sp[4] : callee js function
@ -591,6 +604,8 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
ApiFunction fun(api_function_address);
const int kApiStackSpace = 4;
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
// r0 = v8::Arguments&
@ -616,9 +631,11 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
ExternalReference ref = ExternalReference(&fun,
ExternalReference::DIRECT_API_CALL,
masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
}
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
@ -794,7 +811,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
miss_label);
// Call a runtime function to load the interceptor property.
__ EnterInternalFrame();
FrameScope scope(masm, StackFrame::INTERNAL);
// Save the name_ register across the call.
__ push(name_);
@ -811,7 +828,8 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Restore the name_ register.
__ pop(name_);
__ LeaveInternalFrame();
// Leave the internal frame.
}
void LoadWithInterceptor(MacroAssembler* masm,
@ -820,18 +838,19 @@ class CallInterceptorCompiler BASE_EMBEDDED {
JSObject* holder_obj,
Register scratch,
Label* interceptor_succeeded) {
__ EnterInternalFrame();
__ Push(holder, name_);
CompileCallLoadPropertyWithInterceptor(masm,
receiver,
holder,
name_,
holder_obj);
__ pop(name_); // Restore the name.
__ pop(receiver); // Restore the holder.
__ LeaveInternalFrame();
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(holder, name_);
CompileCallLoadPropertyWithInterceptor(masm,
receiver,
holder,
name_,
holder_obj);
__ pop(name_); // Restore the name.
__ pop(receiver); // Restore the holder.
}
// If interceptor returns no-result sentinel, call the constant function.
__ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
@ -1228,7 +1247,10 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
ApiFunction fun(getter_address);
const int kApiStackSpace = 1;
FrameScope frame_scope(masm(), StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
// Create AccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object **args_) as the data.
__ str(scratch2, MemOperand(sp, 1 * kPointerSize));
@ -1288,41 +1310,43 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
__ EnterInternalFrame();
{
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
// CALLBACKS case needs a receiver to be passed into C++ callback.
__ Push(receiver, holder_reg, name_reg);
} else {
__ Push(holder_reg, name_reg);
}
if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
// CALLBACKS case needs a receiver to be passed into C++ callback.
__ Push(receiver, holder_reg, name_reg);
} else {
__ Push(holder_reg, name_reg);
}
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
CompileCallLoadPropertyWithInterceptor(masm(),
receiver,
holder_reg,
name_reg,
interceptor_holder);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
__ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
__ cmp(r0, scratch1);
__ b(eq, &interceptor_failed);
__ LeaveInternalFrame();
__ Ret();
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
CompileCallLoadPropertyWithInterceptor(masm(),
receiver,
holder_reg,
name_reg,
interceptor_holder);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
__ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
__ cmp(r0, scratch1);
__ b(eq, &interceptor_failed);
frame_scope.GenerateLeaveFrame();
__ Ret();
__ bind(&interceptor_failed);
__ pop(name_reg);
__ pop(holder_reg);
if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
__ pop(receiver);
}
__ bind(&interceptor_failed);
__ pop(name_reg);
__ pop(holder_reg);
if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
__ pop(receiver);
}
__ LeaveInternalFrame();
// Leave the internal frame.
}
// Check that the maps from interceptor's holder to lookup's holder
// haven't changed. And load lookup's holder into |holder| register.
@ -1556,7 +1580,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
DONT_DO_SMI_CHECK);
if (argc == 1) { // Otherwise fall through to call the builtin.
Label exit, with_write_barrier, attempt_to_grow_elements;
Label attempt_to_grow_elements;
// Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
@ -1571,11 +1595,15 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ cmp(r0, r4);
__ b(gt, &attempt_to_grow_elements);
// Check if value is a smi.
Label with_write_barrier;
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
__ JumpIfNotSmi(r4, &with_write_barrier);
// Save new length.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Push the element.
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ add(end_elements, elements,
@ -1585,14 +1613,31 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
// Check for a smi.
__ JumpIfNotSmi(r4, &with_write_barrier);
__ bind(&exit);
__ Drop(argc + 1);
__ Ret();
__ bind(&with_write_barrier);
__ InNewSpace(elements, r4, eq, &exit);
__ RecordWriteHelper(elements, end_elements, r4);
__ ldr(r6, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ CheckFastSmiOnlyElements(r6, r6, &call_builtin);
// Save new length.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Push the element.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ add(end_elements, elements,
Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
__ RecordWrite(elements,
end_elements,
r4,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Drop(argc + 1);
__ Ret();
@ -1604,6 +1649,15 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ b(&call_builtin);
}
__ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
// Growing elements that are SMI-only requires special handling in case
// the new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check;
__ JumpIfSmi(r2, &no_fast_elements_check);
__ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ CheckFastObjectElements(r7, r7, &call_builtin);
__ bind(&no_fast_elements_check);
Isolate* isolate = masm()->isolate();
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate);
@ -1630,8 +1684,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Update new_space_allocation_top.
__ str(r6, MemOperand(r7));
// Push the argument.
__ ldr(r6, MemOperand(sp, (argc - 1) * kPointerSize));
__ str(r6, MemOperand(end_elements));
__ str(r2, MemOperand(end_elements));
// Fill the rest with holes.
__ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
@ -2713,6 +2766,15 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// Store the value in the cell.
__ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
__ mov(r1, r0);
__ RecordWriteField(r4,
JSGlobalPropertyCell::kValueOffset,
r1,
r2,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET);
Counters* counters = masm()->isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3);
__ Ret();
@ -3454,6 +3516,7 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@ -3540,6 +3603,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
}
break;
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@ -3880,6 +3944,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
break;
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@ -3943,6 +4008,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@ -4082,6 +4148,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@ -4234,8 +4301,10 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
}
void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
bool is_js_array) {
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,
ElementsKind elements_kind) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@ -4277,15 +4346,33 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
__ cmp(key_reg, scratch);
__ b(hs, &miss_force_generic);
__ add(scratch,
elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ str(value_reg,
MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
__ RecordWrite(scratch,
Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize),
receiver_reg , elements_reg);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ JumpIfNotSmi(value_reg, &miss_force_generic);
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ add(scratch,
scratch,
Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value_reg, MemOperand(scratch));
} else {
ASSERT(elements_kind == FAST_ELEMENTS);
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ add(scratch,
scratch,
Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value_reg, MemOperand(scratch));
__ mov(receiver_reg, value_reg);
__ RecordWrite(elements_reg, // Object.
scratch, // Address.
receiver_reg, // Value.
kLRHasNotBeenSaved,
kDontSaveFPRegs);
}
// value_reg (r0) is preserved.
// Done.
__ Ret();
@ -4309,15 +4396,15 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- r4 : scratch
// -- r5 : scratch
// -----------------------------------
Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
Label miss_force_generic;
Register value_reg = r0;
Register key_reg = r1;
Register receiver_reg = r2;
Register scratch = r3;
Register elements_reg = r4;
Register mantissa_reg = r5;
Register exponent_reg = r6;
Register elements_reg = r3;
Register scratch1 = r4;
Register scratch2 = r5;
Register scratch3 = r6;
Register scratch4 = r7;
// This stub is meant to be tail-jumped to, the receiver must already
@ -4329,90 +4416,25 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// Check that the key is within bounds.
if (is_js_array) {
__ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
} else {
__ ldr(scratch,
__ ldr(scratch1,
FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
}
// Compare smis, unsigned compare catches both negative and out-of-bound
// indexes.
__ cmp(key_reg, scratch);
__ cmp(key_reg, scratch1);
__ b(hs, &miss_force_generic);
// Handle smi values specially.
__ JumpIfSmi(value_reg, &smi_value);
// Ensure that the object is a heap number
__ CheckMap(value_reg,
scratch,
masm->isolate()->factory()->heap_number_map(),
&miss_force_generic,
DONT_DO_SMI_CHECK);
// Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
// in the exponent.
__ mov(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
__ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
__ cmp(exponent_reg, scratch);
__ b(ge, &maybe_nan);
__ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
__ bind(&have_double_value);
__ add(scratch, elements_reg,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
__ str(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
__ str(exponent_reg, FieldMemOperand(scratch, offset));
__ Ret();
__ bind(&maybe_nan);
// Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
// it's an Infinity, and the non-NaN code path applies.
__ b(gt, &is_nan);
__ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
__ cmp(mantissa_reg, Operand(0));
__ b(eq, &have_double_value);
__ bind(&is_nan);
// Load canonical NaN for storing into the double array.
uint64_t nan_int64 = BitCast<uint64_t>(
FixedDoubleArray::canonical_not_the_hole_nan_as_double());
__ mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
__ mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
__ jmp(&have_double_value);
__ bind(&smi_value);
__ add(scratch, elements_reg,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
__ add(scratch, scratch,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
// scratch is now effective address of the double element
FloatingPointHelper::Destination destination;
if (CpuFeatures::IsSupported(VFP3)) {
destination = FloatingPointHelper::kVFPRegisters;
} else {
destination = FloatingPointHelper::kCoreRegisters;
}
Register untagged_value = receiver_reg;
__ SmiUntag(untagged_value, value_reg);
FloatingPointHelper::ConvertIntToDouble(
masm,
untagged_value,
destination,
d0,
mantissa_reg,
exponent_reg,
scratch4,
s2);
if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatures::Scope scope(VFP3);
__ vstr(d0, scratch, 0);
} else {
__ str(mantissa_reg, MemOperand(scratch, 0));
__ str(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
}
__ StoreNumberToDoubleElements(value_reg,
key_reg,
receiver_reg,
elements_reg,
scratch1,
scratch2,
scratch3,
scratch4,
&miss_force_generic);
__ Ret();
// Handle store cache miss, replacing the ic with the generic stub.

151
deps/v8/src/array.js

@ -201,17 +201,14 @@ function ConvertToString(x) {
function ConvertToLocaleString(e) {
if (e == null) {
if (IS_NULL_OR_UNDEFINED(e)) {
return '';
} else {
// e_obj's toLocaleString might be overwritten, check if it is a function.
// Call ToString if toLocaleString is not a function.
// See issue 877615.
// According to ES5, seciton 15.4.4.3, the toLocaleString conversion
// must throw a TypeError if ToObject(e).toLocaleString isn't
// callable.
var e_obj = ToObject(e);
if (IS_SPEC_FUNCTION(e_obj.toLocaleString))
return ToString(e_obj.toLocaleString());
else
return ToString(e);
return %ToString(e_obj.toLocaleString());
}
}
@ -381,18 +378,31 @@ function SimpleMove(array, start_i, del_count, len, num_additional_args) {
function ArrayToString() {
if (!IS_ARRAY(this)) {
throw new $TypeError('Array.prototype.toString is not generic');
var array;
var func;
if (IS_ARRAY(this)) {
func = this.join;
if (func === ArrayJoin) {
return Join(this, this.length, ',', ConvertToString);
}
array = this;
} else {
array = ToObject(this);
func = array.join;
}
return Join(this, this.length, ',', ConvertToString);
if (!IS_SPEC_FUNCTION(func)) {
return %_CallFunction(array, ObjectToString);
}
return %_CallFunction(array, func);
}
function ArrayToLocaleString() {
if (!IS_ARRAY(this)) {
throw new $TypeError('Array.prototype.toString is not generic');
}
return Join(this, this.length, ',', ConvertToLocaleString);
var array = ToObject(this);
var arrayLen = array.length;
var len = TO_UINT32(arrayLen);
if (len === 0) return "";
return Join(array, len, ',', ConvertToLocaleString);
}
@ -993,21 +1003,24 @@ function ArrayFilter(f, receiver) {
["Array.prototype.filter"]);
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var length = ToUint32(array.length);
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = ToUint32(this.length);
var result = [];
var result_length = 0;
for (var i = 0; i < length; i++) {
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
if (%_CallFunction(receiver, current, i, this, f)) {
var current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
if (%_CallFunction(receiver, current, i, array, f)) {
result[result_length++] = current;
}
}
@ -1022,19 +1035,22 @@ function ArrayForEach(f, receiver) {
["Array.prototype.forEach"]);
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var length = TO_UINT32(array.length);
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = TO_UINT32(this.length);
for (var i = 0; i < length; i++) {
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
%_CallFunction(receiver, current, i, this, f);
var current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
%_CallFunction(receiver, current, i, array, f);
}
}
}
@ -1048,19 +1064,22 @@ function ArraySome(f, receiver) {
["Array.prototype.some"]);
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var length = TO_UINT32(array.length);
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = TO_UINT32(this.length);
for (var i = 0; i < length; i++) {
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
if (%_CallFunction(receiver, current, i, this, f)) return true;
var current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
if (%_CallFunction(receiver, current, i, array, f)) return true;
}
}
return false;
@ -1073,19 +1092,22 @@ function ArrayEvery(f, receiver) {
["Array.prototype.every"]);
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var length = TO_UINT32(array.length);
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = TO_UINT32(this.length);
for (var i = 0; i < length; i++) {
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
if (!%_CallFunction(receiver, current, i, this, f)) return false;
var current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
if (!%_CallFunction(receiver, current, i, array, f)) return false;
}
}
return true;
@ -1097,21 +1119,24 @@ function ArrayMap(f, receiver) {
["Array.prototype.map"]);
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var length = TO_UINT32(array.length);
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = TO_UINT32(this.length);
var result = new $Array();
var accumulator = new InternalArray(length);
for (var i = 0; i < length; i++) {
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
accumulator[i] = %_CallFunction(receiver, current, i, this, f);
var current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
accumulator[i] = %_CallFunction(receiver, current, i, array, f);
}
}
%MoveArrayContents(accumulator, result);
@ -1245,19 +1270,20 @@ function ArrayReduce(callback, current) {
["Array.prototype.reduce"]);
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var length = ToUint32(array.length);
if (!IS_SPEC_FUNCTION(callback)) {
throw MakeTypeError('called_non_callable', [callback]);
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = ToUint32(this.length);
var i = 0;
find_initial: if (%_ArgumentsLength() < 2) {
for (; i < length; i++) {
current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
i++;
break find_initial;
}
@ -1267,9 +1293,9 @@ function ArrayReduce(callback, current) {
var receiver = %GetDefaultReceiver(callback);
for (; i < length; i++) {
var element = this[i];
if (!IS_UNDEFINED(element) || i in this) {
current = %_CallFunction(receiver, current, element, i, this, callback);
var element = array[i];
if (!IS_UNDEFINED(element) || i in array) {
current = %_CallFunction(receiver, current, element, i, array, callback);
}
}
return current;
@ -1281,15 +1307,20 @@ function ArrayReduceRight(callback, current) {
["Array.prototype.reduceRight"]);
}
// Pull out the length so that side effects are visible before the
// callback function is checked.
var array = ToObject(this);
var length = ToUint32(array.length);
if (!IS_SPEC_FUNCTION(callback)) {
throw MakeTypeError('called_non_callable', [callback]);
}
var i = ToUint32(this.length) - 1;
var i = length - 1;
find_initial: if (%_ArgumentsLength() < 2) {
for (; i >= 0; i--) {
current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
i--;
break find_initial;
}
@ -1299,9 +1330,9 @@ function ArrayReduceRight(callback, current) {
var receiver = %GetDefaultReceiver(callback);
for (; i >= 0; i--) {
var element = this[i];
if (!IS_UNDEFINED(element) || i in this) {
current = %_CallFunction(receiver, current, element, i, this, callback);
var element = array[i];
if (!IS_UNDEFINED(element) || i in array) {
current = %_CallFunction(receiver, current, element, i, array, callback);
}
}
return current;

56
deps/v8/src/assembler.cc

@ -38,6 +38,7 @@
#include "deoptimizer.h"
#include "execution.h"
#include "ic-inl.h"
#include "incremental-marking.h"
#include "factory.h"
#include "runtime.h"
#include "runtime-profiler.h"
@ -47,6 +48,7 @@
#include "ast.h"
#include "regexp-macro-assembler.h"
#include "platform.h"
#include "store-buffer.h"
// Include native regexp-macro-assembler.
#ifndef V8_INTERPRETED_REGEXP
#if V8_TARGET_ARCH_IA32
@ -516,6 +518,7 @@ void RelocIterator::next() {
RelocIterator::RelocIterator(Code* code, int mode_mask) {
rinfo_.host_ = code;
rinfo_.pc_ = code->instruction_start();
rinfo_.data_ = 0;
// Relocation info is read backwards.
@ -736,9 +739,38 @@ ExternalReference::ExternalReference(const SCTableReference& table_ref)
: address_(table_ref.address()) {}
ExternalReference ExternalReference::
incremental_marking_record_write_function(Isolate* isolate) {
return ExternalReference(Redirect(
isolate,
FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode)));
}
ExternalReference ExternalReference::
incremental_evacuation_record_write_function(Isolate* isolate) {
return ExternalReference(Redirect(
isolate,
FUNCTION_ADDR(IncrementalMarking::RecordWriteForEvacuationFromCode)));
}
ExternalReference ExternalReference::
store_buffer_overflow_function(Isolate* isolate) {
return ExternalReference(Redirect(
isolate,
FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow)));
}
ExternalReference ExternalReference::flush_icache_function(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CPU::FlushICache)));
}
ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) {
return ExternalReference(Redirect(isolate,
FUNCTION_ADDR(Runtime::PerformGC)));
return
ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::PerformGC)));
}
@ -802,17 +834,6 @@ ExternalReference ExternalReference::keyed_lookup_cache_field_offsets(
}
ExternalReference ExternalReference::the_hole_value_location(Isolate* isolate) {
return ExternalReference(isolate->factory()->the_hole_value().location());
}
ExternalReference ExternalReference::arguments_marker_location(
Isolate* isolate) {
return ExternalReference(isolate->factory()->arguments_marker().location());
}
ExternalReference ExternalReference::roots_address(Isolate* isolate) {
return ExternalReference(isolate->heap()->roots_address());
}
@ -840,9 +861,14 @@ ExternalReference ExternalReference::new_space_start(Isolate* isolate) {
}
ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
return ExternalReference(isolate->heap()->store_buffer()->TopAddress());
}
ExternalReference ExternalReference::new_space_mask(Isolate* isolate) {
Address mask = reinterpret_cast<Address>(isolate->heap()->NewSpaceMask());
return ExternalReference(mask);
return ExternalReference(reinterpret_cast<Address>(
isolate->heap()->NewSpaceMask()));
}

27
deps/v8/src/assembler.h

@ -143,6 +143,9 @@ class Label BASE_EMBEDDED {
};
enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs };
// -----------------------------------------------------------------------------
// Relocation information
@ -216,8 +219,9 @@ class RelocInfo BASE_EMBEDDED {
RelocInfo() {}
RelocInfo(byte* pc, Mode rmode, intptr_t data)
: pc_(pc), rmode_(rmode), data_(data) {
RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host)
: pc_(pc), rmode_(rmode), data_(data), host_(host) {
}
static inline bool IsConstructCall(Mode mode) {
@ -258,6 +262,7 @@ class RelocInfo BASE_EMBEDDED {
void set_pc(byte* pc) { pc_ = pc; }
Mode rmode() const { return rmode_; }
intptr_t data() const { return data_; }
Code* host() const { return host_; }
// Apply a relocation by delta bytes
INLINE(void apply(intptr_t delta));
@ -353,6 +358,7 @@ class RelocInfo BASE_EMBEDDED {
byte* pc_;
Mode rmode_;
intptr_t data_;
Code* host_;
#ifdef V8_TARGET_ARCH_MIPS
// Code and Embedded Object pointers in mips are stored split
// across two consecutive 32-bit instructions. Heap management
@ -561,6 +567,13 @@ class ExternalReference BASE_EMBEDDED {
// pattern. This means that they have to be added to the
// ExternalReferenceTable in serialize.cc manually.
static ExternalReference incremental_marking_record_write_function(
Isolate* isolate);
static ExternalReference incremental_evacuation_record_write_function(
Isolate* isolate);
static ExternalReference store_buffer_overflow_function(
Isolate* isolate);
static ExternalReference flush_icache_function(Isolate* isolate);
static ExternalReference perform_gc_function(Isolate* isolate);
static ExternalReference fill_heap_number_with_random_function(
Isolate* isolate);
@ -577,12 +590,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference keyed_lookup_cache_keys(Isolate* isolate);
static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate);
// Static variable Factory::the_hole_value.location()
static ExternalReference the_hole_value_location(Isolate* isolate);
// Static variable Factory::arguments_marker.location()
static ExternalReference arguments_marker_location(Isolate* isolate);
// Static variable Heap::roots_address()
static ExternalReference roots_address(Isolate* isolate);
@ -606,6 +613,10 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference new_space_start(Isolate* isolate);
static ExternalReference new_space_mask(Isolate* isolate);
static ExternalReference heap_always_allocate_scope_depth(Isolate* isolate);
static ExternalReference new_space_mark_bits(Isolate* isolate);
// Write barrier.
static ExternalReference store_buffer_top(Isolate* isolate);
// Used for fast allocation in generated code.
static ExternalReference new_space_allocation_top_address(Isolate* isolate);

154
deps/v8/src/ast.cc

@ -327,56 +327,77 @@ bool BinaryOperation::ResultOverwriteAllowed() {
}
bool CompareOperation::IsLiteralCompareTypeof(Expression** expr,
Handle<String>* check) {
if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return false;
UnaryOperation* left_unary = left_->AsUnaryOperation();
UnaryOperation* right_unary = right_->AsUnaryOperation();
Literal* left_literal = left_->AsLiteral();
Literal* right_literal = right_->AsLiteral();
// Check for the pattern: typeof <expression> == <string literal>.
if (left_unary != NULL && left_unary->op() == Token::TYPEOF &&
right_literal != NULL && right_literal->handle()->IsString()) {
*expr = left_unary->expression();
*check = Handle<String>::cast(right_literal->handle());
static bool IsTypeof(Expression* expr) {
UnaryOperation* maybe_unary = expr->AsUnaryOperation();
return maybe_unary != NULL && maybe_unary->op() == Token::TYPEOF;
}
// Check for the pattern: typeof <expression> equals <string literal>.
static bool MatchLiteralCompareTypeof(Expression* left,
Token::Value op,
Expression* right,
Expression** expr,
Handle<String>* check) {
if (IsTypeof(left) && right->IsStringLiteral() && Token::IsEqualityOp(op)) {
*expr = left->AsUnaryOperation()->expression();
*check = Handle<String>::cast(right->AsLiteral()->handle());
return true;
}
return false;
}
// Check for the pattern: <string literal> == typeof <expression>.
if (right_unary != NULL && right_unary->op() == Token::TYPEOF &&
left_literal != NULL && left_literal->handle()->IsString()) {
*expr = right_unary->expression();
*check = Handle<String>::cast(left_literal->handle());
bool CompareOperation::IsLiteralCompareTypeof(Expression** expr,
Handle<String>* check) {
return MatchLiteralCompareTypeof(left_, op_, right_, expr, check) ||
MatchLiteralCompareTypeof(right_, op_, left_, expr, check);
}
static bool IsVoidOfLiteral(Expression* expr) {
UnaryOperation* maybe_unary = expr->AsUnaryOperation();
return maybe_unary != NULL &&
maybe_unary->op() == Token::VOID &&
maybe_unary->expression()->AsLiteral() != NULL;
}
// Check for the pattern: void <literal> equals <expression>
static bool MatchLiteralCompareUndefined(Expression* left,
Token::Value op,
Expression* right,
Expression** expr) {
if (IsVoidOfLiteral(left) && Token::IsEqualityOp(op)) {
*expr = right;
return true;
}
return false;
}
bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
if (op_ != Token::EQ_STRICT) return false;
return MatchLiteralCompareUndefined(left_, op_, right_, expr) ||
MatchLiteralCompareUndefined(right_, op_, left_, expr);
}
UnaryOperation* left_unary = left_->AsUnaryOperation();
UnaryOperation* right_unary = right_->AsUnaryOperation();
// Check for the pattern: <expression> === void <literal>.
if (right_unary != NULL && right_unary->op() == Token::VOID &&
right_unary->expression()->AsLiteral() != NULL) {
*expr = left_;
// Check for the pattern: null equals <expression>
static bool MatchLiteralCompareNull(Expression* left,
Token::Value op,
Expression* right,
Expression** expr) {
if (left->IsNullLiteral() && Token::IsEqualityOp(op)) {
*expr = right;
return true;
}
return false;
}
// Check for the pattern: void <literal> === <expression>.
if (left_unary != NULL && left_unary->op() == Token::VOID &&
left_unary->expression()->AsLiteral() != NULL) {
*expr = right_;
return true;
}
return false;
bool CompareOperation::IsLiteralCompareNull(Expression** expr) {
return MatchLiteralCompareNull(left_, op_, right_, expr) ||
MatchLiteralCompareNull(right_, op_, left_, expr);
}
@ -529,7 +550,9 @@ bool Conditional::IsInlineable() const {
bool VariableProxy::IsInlineable() const {
return var()->IsUnallocated() || var()->IsStackAllocated();
return var()->IsUnallocated()
|| var()->IsStackAllocated()
|| var()->IsContextSlot();
}
@ -598,11 +621,6 @@ bool CompareOperation::IsInlineable() const {
}
bool CompareToNull::IsInlineable() const {
return expression()->IsInlineable();
}
bool CountOperation::IsInlineable() const {
return expression()->IsInlineable();
}
@ -746,37 +764,41 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
CallKind call_kind) {
is_monomorphic_ = oracle->CallIsMonomorphic(this);
Property* property = expression()->AsProperty();
ASSERT(property != NULL);
// Specialize for the receiver types seen at runtime.
Literal* key = property->key()->AsLiteral();
ASSERT(key != NULL && key->handle()->IsString());
Handle<String> name = Handle<String>::cast(key->handle());
receiver_types_.Clear();
oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
if (property == NULL) {
// Function call. Specialize for monomorphic calls.
if (is_monomorphic_) target_ = oracle->GetCallTarget(this);
} else {
// Method call. Specialize for the receiver types seen at runtime.
Literal* key = property->key()->AsLiteral();
ASSERT(key != NULL && key->handle()->IsString());
Handle<String> name = Handle<String>::cast(key->handle());
receiver_types_.Clear();
oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
int length = receiver_types_.length();
for (int i = 0; i < length; i++) {
Handle<Map> map = receiver_types_.at(i);
ASSERT(!map.is_null() && *map != NULL);
if (FLAG_enable_slow_asserts) {
int length = receiver_types_.length();
for (int i = 0; i < length; i++) {
Handle<Map> map = receiver_types_.at(i);
ASSERT(!map.is_null() && *map != NULL);
}
}
}
#endif
is_monomorphic_ = oracle->CallIsMonomorphic(this);
check_type_ = oracle->GetCallCheckType(this);
if (is_monomorphic_) {
Handle<Map> map;
if (receiver_types_.length() > 0) {
ASSERT(check_type_ == RECEIVER_MAP_CHECK);
map = receiver_types_.at(0);
} else {
ASSERT(check_type_ != RECEIVER_MAP_CHECK);
holder_ = Handle<JSObject>(
oracle->GetPrototypeForPrimitiveCheck(check_type_));
map = Handle<Map>(holder_->map());
check_type_ = oracle->GetCallCheckType(this);
if (is_monomorphic_) {
Handle<Map> map;
if (receiver_types_.length() > 0) {
ASSERT(check_type_ == RECEIVER_MAP_CHECK);
map = receiver_types_.at(0);
} else {
ASSERT(check_type_ != RECEIVER_MAP_CHECK);
holder_ = Handle<JSObject>(
oracle->GetPrototypeForPrimitiveCheck(check_type_));
map = Handle<Map>(holder_->map());
}
is_monomorphic_ = ComputeTarget(map, name);
}
is_monomorphic_ = ComputeTarget(map, name);
}
}

29
deps/v8/src/ast.h

@ -90,7 +90,6 @@ namespace internal {
V(CountOperation) \
V(BinaryOperation) \
V(CompareOperation) \
V(CompareToNull) \
V(ThisFunction)
#define AST_NODE_LIST(V) \
@ -289,6 +288,12 @@ class Expression: public AstNode {
// True iff the expression is a literal represented as a smi.
virtual bool IsSmiLiteral() { return false; }
// True iff the expression is a string literal.
virtual bool IsStringLiteral() { return false; }
// True iff the expression is the null literal.
virtual bool IsNullLiteral() { return false; }
// Type feedback information for assignments and properties.
virtual bool IsMonomorphic() {
UNREACHABLE();
@ -891,6 +896,8 @@ class Literal: public Expression {
virtual bool IsTrivial() { return true; }
virtual bool IsSmiLiteral() { return handle_->IsSmi(); }
virtual bool IsStringLiteral() { return handle_->IsString(); }
virtual bool IsNullLiteral() { return handle_->IsNull(); }
// Check if this literal is identical to the other literal.
bool IsIdenticalTo(const Literal* other) const {
@ -1465,6 +1472,7 @@ class CompareOperation: public Expression {
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
bool IsLiteralCompareUndefined(Expression** expr);
bool IsLiteralCompareNull(Expression** expr);
private:
Token::Value op_;
@ -1477,25 +1485,6 @@ class CompareOperation: public Expression {
};
class CompareToNull: public Expression {
public:
CompareToNull(Isolate* isolate, bool is_strict, Expression* expression)
: Expression(isolate), is_strict_(is_strict), expression_(expression) { }
DECLARE_NODE_TYPE(CompareToNull)
virtual bool IsInlineable() const;
bool is_strict() const { return is_strict_; }
Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; }
Expression* expression() const { return expression_; }
private:
bool is_strict_;
Expression* expression_;
};
class Conditional: public Expression {
public:
Conditional(Isolate* isolate,

57
deps/v8/src/bootstrapper.cc

@ -34,6 +34,7 @@
#include "debug.h"
#include "execution.h"
#include "global-handles.h"
#include "isolate-inl.h"
#include "macro-assembler.h"
#include "natives.h"
#include "objects-visiting.h"
@ -995,6 +996,26 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
initial_map->instance_size() + 5 * kPointerSize);
initial_map->set_instance_descriptors(*descriptors);
initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map));
// RegExp prototype object is itself a RegExp.
Handle<Map> proto_map = factory->CopyMapDropTransitions(initial_map);
proto_map->set_prototype(global_context()->initial_object_prototype());
Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
heap->empty_string());
proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex,
heap->false_value());
proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex,
heap->false_value());
proto->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex,
heap->false_value());
proto->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
Smi::FromInt(0),
SKIP_WRITE_BARRIER); // It's a Smi.
initial_map->set_prototype(*proto);
factory->SetRegExpIrregexpData(Handle<JSRegExp>::cast(proto),
JSRegExp::IRREGEXP, factory->empty_string(),
JSRegExp::Flags(0), 0);
}
{ // -- J S O N
@ -1076,6 +1097,11 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
elements->set(0, *array);
array = factory->NewFixedArray(0);
elements->set(1, *array);
Handle<Map> non_strict_arguments_elements_map =
factory->GetElementsTransitionMap(result,
NON_STRICT_ARGUMENTS_ELEMENTS);
result->set_map(*non_strict_arguments_elements_map);
ASSERT(result->HasNonStrictArgumentsElements());
result->set_elements(*elements);
global_context()->set_aliased_arguments_boilerplate(*result);
}
@ -1327,6 +1353,8 @@ void Genesis::InstallNativeFunctions() {
configure_instance_fun);
INSTALL_NATIVE(JSFunction, "GetStackTraceLine", get_stack_trace_line_fun);
INSTALL_NATIVE(JSObject, "functionCache", function_cache);
INSTALL_NATIVE(JSFunction, "ToCompletePropertyDescriptor",
to_complete_property_descriptor);
}
void Genesis::InstallExperimentalNativeFunctions() {
@ -1555,6 +1583,18 @@ bool Genesis::InstallNatives() {
isolate()->builtins()->builtin(Builtins::kArrayConstructCode));
array_function->shared()->DontAdaptArguments();
// InternalArrays should not use Smi-Only array optimizations. There are too
// many places in the C++ runtime code (e.g. RegEx) that assume that
// elements in InternalArrays can be set to non-Smi values without going
// through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT
// transition easy to trap. Moreover, they rarely are smi-only.
MaybeObject* maybe_map =
array_function->initial_map()->CopyDropTransitions();
Map* new_map;
if (!maybe_map->To<Map>(&new_map)) return maybe_map;
new_map->set_elements_kind(FAST_ELEMENTS);
array_function->set_initial_map(new_map);
// Make "length" magic on instances.
Handle<DescriptorArray> array_descriptors =
factory()->CopyAppendForeignDescriptor(
@ -1938,14 +1978,15 @@ bool Genesis::InstallExtension(v8::RegisteredExtension* current) {
if (!InstallExtension(extension->dependencies()[i])) return false;
}
Isolate* isolate = Isolate::Current();
Vector<const char> source = CStrVector(extension->source());
Handle<String> source_code = isolate->factory()->NewStringFromAscii(source);
bool result = CompileScriptCached(CStrVector(extension->name()),
source_code,
isolate->bootstrapper()->extensions_cache(),
extension,
Handle<Context>(isolate->context()),
false);
Handle<String> source_code =
isolate->factory()->NewExternalStringFromAscii(extension->source());
bool result = CompileScriptCached(
CStrVector(extension->name()),
source_code,
isolate->bootstrapper()->extensions_cache(),
extension,
Handle<Context>(isolate->context()),
false);
ASSERT(isolate->has_pending_exception() != result);
if (!result) {
isolate->clear_pending_exception();

146
deps/v8/src/builtins.cc

@ -33,6 +33,7 @@
#include "builtins.h"
#include "gdb-jit.h"
#include "ic-inl.h"
#include "mark-compact.h"
#include "vm-state-inl.h"
namespace v8 {
@ -202,7 +203,7 @@ BUILTIN(ArrayCodeGeneric) {
}
// 'array' now contains the JSArray we should initialize.
ASSERT(array->HasFastElements());
ASSERT(array->HasFastTypeElements());
// Optimize the case where there is one argument and the argument is a
// small smi.
@ -215,7 +216,8 @@ BUILTIN(ArrayCodeGeneric) {
{ MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
array->SetContent(FixedArray::cast(obj));
MaybeObject* maybe_obj = array->SetContent(FixedArray::cast(obj));
if (maybe_obj->IsFailure()) return maybe_obj;
return array;
}
}
@ -239,6 +241,11 @@ BUILTIN(ArrayCodeGeneric) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
// Set length and elements on the array.
MaybeObject* maybe_object =
array->EnsureCanContainElements(FixedArray::cast(obj));
if (maybe_object->IsFailure()) return maybe_object;
AssertNoAllocation no_gc;
FixedArray* elms = FixedArray::cast(obj);
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
@ -247,7 +254,6 @@ BUILTIN(ArrayCodeGeneric) {
elms->set(index, args[index+1], mode);
}
// Set length and elements on the array.
array->set_elements(FixedArray::cast(obj));
array->set_length(len);
@ -295,6 +301,7 @@ static void CopyElements(Heap* heap,
if (mode == UPDATE_WRITE_BARRIER) {
heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
}
heap->incremental_marking()->RecordWrites(dst);
}
@ -313,6 +320,7 @@ static void MoveElements(Heap* heap,
if (mode == UPDATE_WRITE_BARRIER) {
heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
}
heap->incremental_marking()->RecordWrites(dst);
}
@ -358,6 +366,14 @@ static FixedArray* LeftTrimFixedArray(Heap* heap,
former_start[to_trim] = heap->fixed_array_map();
former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
// Maintain marking consistency for HeapObjectIterator and
// IncrementalMarking.
int size_delta = to_trim * kPointerSize;
if (heap->marking()->TransferMark(elms->address(),
elms->address() + size_delta)) {
MemoryChunk::IncrementLiveBytes(elms->address(), -size_delta);
}
return FixedArray::cast(HeapObject::FromAddress(
elms->address() + to_trim * kPointerSize));
}
@ -384,15 +400,36 @@ static bool ArrayPrototypeHasNoElements(Heap* heap,
MUST_USE_RESULT
static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
Heap* heap, Object* receiver) {
Heap* heap, Object* receiver, Arguments* args, int first_added_arg) {
if (!receiver->IsJSArray()) return NULL;
JSArray* array = JSArray::cast(receiver);
HeapObject* elms = array->elements();
if (elms->map() == heap->fixed_array_map()) return elms;
if (elms->map() == heap->fixed_cow_array_map()) {
return array->EnsureWritableFastElements();
Map* map = elms->map();
if (map == heap->fixed_array_map()) {
if (args == NULL || !array->HasFastSmiOnlyElements()) {
return elms;
}
} else if (map == heap->fixed_cow_array_map()) {
MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
if (args == NULL || !array->HasFastSmiOnlyElements() ||
maybe_writable_result->IsFailure()) {
return maybe_writable_result;
}
} else {
return NULL;
}
return NULL;
// Need to ensure that the arguments passed in args can be contained in
// the array.
int args_length = args->length();
if (first_added_arg >= args_length) return array->elements();
MaybeObject* maybe_array = array->EnsureCanContainElements(
args,
first_added_arg,
args_length - first_added_arg);
if (maybe_array->IsFailure()) return maybe_array;
return array->elements();
}
@ -413,20 +450,18 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
HandleScope handleScope(isolate);
Handle<Object> js_builtin =
GetProperty(Handle<JSObject>(
isolate->global_context()->builtins()),
name);
ASSERT(js_builtin->IsJSFunction());
Handle<JSFunction> function(Handle<JSFunction>::cast(js_builtin));
ScopedVector<Object**> argv(args.length() - 1);
int n_args = args.length() - 1;
for (int i = 0; i < n_args; i++) {
argv[i] = args.at<Object>(i + 1).location();
}
bool pending_exception = false;
GetProperty(Handle<JSObject>(isolate->global_context()->builtins()),
name);
Handle<JSFunction> function = Handle<JSFunction>::cast(js_builtin);
int argc = args.length() - 1;
ScopedVector<Handle<Object> > argv(argc);
for (int i = 0; i < argc; ++i) {
argv[i] = args.at<Object>(i + 1);
}
bool pending_exception;
Handle<Object> result = Execution::Call(function,
args.receiver(),
n_args,
argc,
argv.start(),
&pending_exception);
if (pending_exception) return Failure::Exception();
@ -439,7 +474,7 @@ BUILTIN(ArrayPush) {
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver);
EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1);
if (maybe_elms_obj == NULL) {
return CallJsBuiltin(isolate, "ArrayPush", args);
}
@ -475,7 +510,6 @@ BUILTIN(ArrayPush) {
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
}
// Add the provided values.
@ -485,6 +519,10 @@ BUILTIN(ArrayPush) {
elms->set(index + len, args[index + 1], mode);
}
if (elms != array->elements()) {
array->set_elements(elms);
}
// Set the length.
array->set_length(Smi::FromInt(new_length));
return Smi::FromInt(new_length);
@ -496,7 +534,7 @@ BUILTIN(ArrayPop) {
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver);
EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
@ -529,7 +567,7 @@ BUILTIN(ArrayShift) {
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver);
EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
if (maybe_elms_obj == NULL)
return CallJsBuiltin(isolate, "ArrayShift", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@ -539,7 +577,7 @@ BUILTIN(ArrayShift) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastElements());
ASSERT(array->HasFastTypeElements());
int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value();
@ -551,9 +589,7 @@ BUILTIN(ArrayShift) {
}
if (!heap->lo_space()->Contains(elms)) {
// As elms still in the same space they used to be,
// there is no need to update region dirty mark.
array->set_elements(LeftTrimFixedArray(heap, elms, 1), SKIP_WRITE_BARRIER);
array->set_elements(LeftTrimFixedArray(heap, elms, 1));
} else {
// Shift the elements.
AssertNoAllocation no_gc;
@ -573,7 +609,7 @@ BUILTIN(ArrayUnshift) {
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver);
EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
if (maybe_elms_obj == NULL)
return CallJsBuiltin(isolate, "ArrayUnshift", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@ -583,7 +619,7 @@ BUILTIN(ArrayUnshift) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastElements());
ASSERT(array->HasFastTypeElements());
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
@ -592,6 +628,10 @@ BUILTIN(ArrayUnshift) {
// we should never hit this case.
ASSERT(to_add <= (Smi::kMaxValue - len));
MaybeObject* maybe_object =
array->EnsureCanContainElements(&args, 1, to_add);
if (maybe_object->IsFailure()) return maybe_object;
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
@ -600,13 +640,11 @@ BUILTIN(ArrayUnshift) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* new_elms = FixedArray::cast(obj);
AssertNoAllocation no_gc;
if (len > 0) {
CopyElements(heap, &no_gc, new_elms, to_add, elms, 0, len);
}
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
} else {
@ -634,7 +672,7 @@ BUILTIN(ArraySlice) {
int len = -1;
if (receiver->IsJSArray()) {
JSArray* array = JSArray::cast(receiver);
if (!array->HasFastElements() ||
if (!array->HasFastTypeElements() ||
!IsJSArrayFastElementMovingAllowed(heap, array)) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@ -650,7 +688,7 @@ BUILTIN(ArraySlice) {
bool is_arguments_object_with_fast_elements =
receiver->IsJSObject()
&& JSObject::cast(receiver)->map() == arguments_map
&& JSObject::cast(receiver)->HasFastElements();
&& JSObject::cast(receiver)->HasFastTypeElements();
if (!is_arguments_object_with_fast_elements) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@ -721,6 +759,10 @@ BUILTIN(ArraySlice) {
}
FixedArray* result_elms = FixedArray::cast(result);
MaybeObject* maybe_object =
result_array->EnsureCanContainElements(result_elms);
if (maybe_object->IsFailure()) return maybe_object;
AssertNoAllocation no_gc;
CopyElements(heap, &no_gc, result_elms, 0, elms, k, result_len);
@ -738,7 +780,7 @@ BUILTIN(ArraySplice) {
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver);
EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3);
if (maybe_elms_obj == NULL)
return CallJsBuiltin(isolate, "ArraySplice", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@ -748,7 +790,7 @@ BUILTIN(ArraySplice) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastElements());
ASSERT(array->HasFastTypeElements());
int len = Smi::cast(array->length())->value();
@ -825,9 +867,9 @@ BUILTIN(ArraySplice) {
}
int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
int new_length = len - actual_delete_count + item_count;
bool elms_changed = false;
if (item_count < actual_delete_count) {
// Shrink the array.
const bool trim_array = !heap->lo_space()->Contains(elms) &&
@ -842,7 +884,8 @@ BUILTIN(ArraySplice) {
}
elms = LeftTrimFixedArray(heap, elms, delta);
array->set_elements(elms, SKIP_WRITE_BARRIER);
elms_changed = true;
} else {
AssertNoAllocation no_gc;
MoveElements(heap, &no_gc,
@ -882,7 +925,7 @@ BUILTIN(ArraySplice) {
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
elms_changed = true;
} else {
AssertNoAllocation no_gc;
MoveElements(heap, &no_gc,
@ -898,6 +941,10 @@ BUILTIN(ArraySplice) {
elms->set(k, args[3 + k - actual_start], mode);
}
if (elms_changed) {
array->set_elements(elms);
}
// Set the length.
array->set_length(Smi::FromInt(new_length));
@ -920,7 +967,7 @@ BUILTIN(ArrayConcat) {
int result_len = 0;
for (int i = 0; i < n_arguments; i++) {
Object* arg = args[i];
if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements()
if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastTypeElements()
|| JSArray::cast(arg)->GetPrototype() != array_proto) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
@ -956,6 +1003,17 @@ BUILTIN(ArrayConcat) {
}
FixedArray* result_elms = FixedArray::cast(result);
// Ensure element type transitions happen before copying elements in.
if (result_array->HasFastSmiOnlyElements()) {
for (int i = 0; i < n_arguments; i++) {
JSArray* array = JSArray::cast(args[i]);
if (!array->HasFastSmiOnlyElements()) {
result_array->EnsureCanContainNonSmiElements();
break;
}
}
}
// Copy data.
AssertNoAllocation no_gc;
int start_pos = 0;
@ -1607,20 +1665,22 @@ void Builtins::Setup(bool create_heap_objects) {
const BuiltinDesc* functions = BuiltinFunctionTable::functions();
// For now we generate builtin adaptor code into a stack-allocated
// buffer, before copying it into individual code objects.
byte buffer[4*KB];
// buffer, before copying it into individual code objects. Be careful
// with alignment, some platforms don't like unaligned code.
union { int force_alignment; byte buffer[4*KB]; } u;
// Traverse the list of builtins and generate an adaptor in a
// separate code object for each one.
for (int i = 0; i < builtin_count; i++) {
if (create_heap_objects) {
MacroAssembler masm(isolate, buffer, sizeof buffer);
MacroAssembler masm(isolate, u.buffer, sizeof u.buffer);
// Generate the code/adaptor.
typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
// We pass all arguments to the generator, but it may not use all of
// them. This works because the first arguments are on top of the
// stack.
ASSERT(!masm.has_frame());
g(&masm, functions[i].name, functions[i].extra_args);
// Move the code into the object heap.
CodeDesc desc;

12
deps/v8/src/cached-powers.cc

@ -134,14 +134,12 @@ static const CachedPower kCachedPowers[] = {
};
static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers);
static const int kCachedPowersOffset = -kCachedPowers[0].decimal_exponent;
static const int kCachedPowersOffset = 348; // -1 * the first decimal_exponent.
static const double kD_1_LOG2_10 = 0.30102999566398114; // 1 / lg(10)
const int PowersOfTenCache::kDecimalExponentDistance =
kCachedPowers[1].decimal_exponent - kCachedPowers[0].decimal_exponent;
const int PowersOfTenCache::kMinDecimalExponent =
kCachedPowers[0].decimal_exponent;
const int PowersOfTenCache::kMaxDecimalExponent =
kCachedPowers[kCachedPowersLength - 1].decimal_exponent;
// Difference between the decimal exponents in the table above.
const int PowersOfTenCache::kDecimalExponentDistance = 8;
const int PowersOfTenCache::kMinDecimalExponent = -348;
const int PowersOfTenCache::kMaxDecimalExponent = 340;
void PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
int min_exponent,

45
deps/v8/src/code-stubs.cc

@ -52,11 +52,12 @@ void CodeStub::GenerateCode(MacroAssembler* masm) {
// Update the static counter each time a new code stub is generated.
masm->isolate()->counters()->code_stubs()->Increment();
// Nested stubs are not allowed for leafs.
AllowStubCallsScope allow_scope(masm, AllowsStubCalls());
// Nested stubs are not allowed for leaves.
AllowStubCallsScope allow_scope(masm, false);
// Generate the code for the stub.
masm->set_generating_stub(true);
NoCurrentFrameScope scope(masm);
Generate(masm);
}
@ -127,8 +128,10 @@ Handle<Code> CodeStub::GetCode() {
GetKey(),
new_object);
heap->public_set_code_stubs(*dict);
code = *new_object;
Activate(code);
} else {
CHECK(IsPregenerated() == code->is_pregenerated());
}
ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code));
@ -166,7 +169,11 @@ MaybeObject* CodeStub::TryGetCode() {
heap->code_stubs()->AtNumberPut(GetKey(), code);
if (maybe_new_object->ToObject(&new_object)) {
heap->public_set_code_stubs(NumberDictionary::cast(new_object));
} else if (MustBeInStubCache()) {
return maybe_new_object;
}
Activate(code);
}
return code;
@ -188,6 +195,11 @@ const char* CodeStub::MajorName(CodeStub::Major major_key,
}
void CodeStub::PrintName(StringStream* stream) {
stream->Add("%s", MajorName(MajorKey(), false));
}
int ICCompareStub::MinorKey() {
return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
}
@ -245,6 +257,7 @@ void InstanceofStub::PrintName(StringStream* stream) {
void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
break;
case FAST_DOUBLE_ELEMENTS:
@ -274,7 +287,11 @@ void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case FAST_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_);
case FAST_SMI_ONLY_ELEMENTS: {
KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_js_array_,
elements_kind_);
}
break;
case FAST_DOUBLE_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
@ -302,24 +319,20 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::PrintName(StringStream* stream) {
const char* type_name = NULL; // Make g++ happy.
stream->Add("ArgumentsAccessStub_");
switch (type_) {
case READ_ELEMENT: type_name = "ReadElement"; break;
case NEW_NON_STRICT_FAST: type_name = "NewNonStrictFast"; break;
case NEW_NON_STRICT_SLOW: type_name = "NewNonStrictSlow"; break;
case NEW_STRICT: type_name = "NewStrict"; break;
case READ_ELEMENT: stream->Add("ReadElement"); break;
case NEW_NON_STRICT_FAST: stream->Add("NewNonStrictFast"); break;
case NEW_NON_STRICT_SLOW: stream->Add("NewNonStrictSlow"); break;
case NEW_STRICT: stream->Add("NewStrict"); break;
}
stream->Add("ArgumentsAccessStub_%s", type_name);
}
void CallFunctionStub::PrintName(StringStream* stream) {
const char* flags_name = NULL; // Make g++ happy.
switch (flags_) {
case NO_CALL_FUNCTION_FLAGS: flags_name = ""; break;
case RECEIVER_MIGHT_BE_IMPLICIT: flags_name = "_Implicit"; break;
}
stream->Add("CallFunctionStub_Args%d%s", argc_, flags_name);
stream->Add("CallFunctionStub_Args%d", argc_);
if (ReceiverMightBeImplicit()) stream->Add("_Implicit");
if (RecordCallTarget()) stream->Add("_Recording");
}

117
deps/v8/src/code-stubs.h

@ -45,27 +45,23 @@ namespace internal {
V(Compare) \
V(CompareIC) \
V(MathPow) \
V(RecordWrite) \
V(StoreBufferOverflow) \
V(RegExpExec) \
V(TranscendentalCache) \
V(Instanceof) \
/* All stubs above this line only exist in a few versions, which are */ \
/* generated ahead of time. Therefore compiling a call to one of */ \
/* them can't cause a new stub to be compiled, so compiling a call to */ \
/* them is GC safe. The ones below this line exist in many variants */ \
/* so code compiling a call to one can cause a GC. This means they */ \
/* can't be called from other stubs, since stub generation code is */ \
/* not GC safe. */ \
V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \
V(StackCheck) \
V(FastNewClosure) \
V(FastNewContext) \
V(FastNewBlockContext) \
V(FastCloneShallowArray) \
V(RevertToNumber) \
V(ToBoolean) \
V(ToNumber) \
V(CounterOp) \
V(ArgumentsAccess) \
V(RegExpExec) \
V(RegExpConstructResult) \
V(NumberToString) \
V(CEntry) \
@ -73,7 +69,7 @@ namespace internal {
V(KeyedLoadElement) \
V(KeyedStoreElement) \
V(DebuggerStatement) \
V(StringDictionaryNegativeLookup)
V(StringDictionaryLookup)
// List of code stubs only used on ARM platforms.
#ifdef V8_TARGET_ARCH_ARM
@ -142,6 +138,27 @@ class CodeStub BASE_EMBEDDED {
virtual ~CodeStub() {}
bool CompilingCallsToThisStubIsGCSafe() {
bool is_pregenerated = IsPregenerated();
Code* code = NULL;
CHECK(!is_pregenerated || FindCodeInCache(&code));
return is_pregenerated;
}
// See comment above, where Instanceof is defined.
virtual bool IsPregenerated() { return false; }
static void GenerateStubsAheadOfTime();
static void GenerateFPStubs();
// Some stubs put untagged junk on the stack that cannot be scanned by the
// GC. This means that we must be statically sure that no GC can occur while
// they are running. If that is the case they should override this to return
// true, which will cause an assertion if we try to call something that can
// GC or if we try to put a stack frame on top of the junk, which would not
// result in a traversable stack.
virtual bool SometimesSetsUpAFrame() { return true; }
protected:
static const int kMajorBits = 6;
static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
@ -164,6 +181,14 @@ class CodeStub BASE_EMBEDDED {
// Finish the code object after it has been generated.
virtual void FinishCode(Code* code) { }
// Returns true if TryGetCode should fail if it failed
// to register newly generated stub in the stub cache.
virtual bool MustBeInStubCache() { return false; }
// Activate newly generated stub. Is called after
// registering stub in the stub cache.
virtual void Activate(Code* code) { }
// Returns information for computing the number key.
virtual Major MajorKey() = 0;
virtual int MinorKey() = 0;
@ -178,9 +203,7 @@ class CodeStub BASE_EMBEDDED {
// Returns a name for logging/debugging purposes.
SmartArrayPointer<const char> GetName();
virtual void PrintName(StringStream* stream) {
stream->Add("%s", MajorName(MajorKey(), false));
}
virtual void PrintName(StringStream* stream);
// Returns whether the code generated for this stub needs to be allocated as
// a fixed (non-moveable) code object.
@ -193,9 +216,6 @@ class CodeStub BASE_EMBEDDED {
MajorKeyBits::encode(MajorKey());
}
// See comment above, where Instanceof is defined.
bool AllowsStubCalls() { return MajorKey() <= Instanceof; }
class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {};
class MinorKeyBits: public BitField<uint32_t, kMajorBits, kMinorBits> {};
@ -304,7 +324,7 @@ class FastNewContextStub : public CodeStub {
static const int kMaximumSlots = 64;
explicit FastNewContextStub(int slots) : slots_(slots) {
ASSERT(slots_ > 0 && slots <= kMaximumSlots);
ASSERT(slots_ > 0 && slots_ <= kMaximumSlots);
}
void Generate(MacroAssembler* masm);
@ -317,6 +337,24 @@ class FastNewContextStub : public CodeStub {
};
class FastNewBlockContextStub : public CodeStub {
public:
static const int kMaximumSlots = 64;
explicit FastNewBlockContextStub(int slots) : slots_(slots) {
ASSERT(slots_ > 0 && slots_ <= kMaximumSlots);
}
void Generate(MacroAssembler* masm);
private:
int slots_;
Major MajorKey() { return FastNewBlockContext; }
int MinorKey() { return slots_; }
};
class FastCloneShallowArrayStub : public CodeStub {
public:
// Maximum length of copied elements array.
@ -531,11 +569,18 @@ class CompareStub: public CodeStub {
class CEntryStub : public CodeStub {
public:
explicit CEntryStub(int result_size)
: result_size_(result_size), save_doubles_(false) { }
explicit CEntryStub(int result_size,
SaveFPRegsMode save_doubles = kDontSaveFPRegs)
: result_size_(result_size), save_doubles_(save_doubles) { }
void Generate(MacroAssembler* masm);
void SaveDoubles() { save_doubles_ = true; }
// The version of this stub that doesn't save doubles is generated ahead of
// time, so it's OK to call it from other stubs that can't cope with GC during
// their code generation. On machines that always have gp registers (x64) we
// can generate both variants ahead of time.
virtual bool IsPregenerated();
static void GenerateAheadOfTime();
private:
void GenerateCore(MacroAssembler* masm,
@ -550,7 +595,7 @@ class CEntryStub : public CodeStub {
// Number of pointers/values returned.
const int result_size_;
bool save_doubles_;
SaveFPRegsMode save_doubles_;
Major MajorKey() { return CEntry; }
int MinorKey();
@ -647,10 +692,32 @@ class CallFunctionStub: public CodeStub {
void Generate(MacroAssembler* masm);
virtual void FinishCode(Code* code);
static void Clear(Heap* heap, Address address);
static Object* GetCachedValue(Address address);
static int ExtractArgcFromMinorKey(int minor_key) {
return ArgcBits::decode(minor_key);
}
// The object that indicates an uninitialized cache.
static Handle<Object> UninitializedSentinel(Isolate* isolate) {
return isolate->factory()->the_hole_value();
}
// A raw version of the uninitialized sentinel that's safe to read during
// garbage collection (e.g., for patching the cache).
static Object* RawUninitializedSentinel(Heap* heap) {
return heap->raw_unchecked_the_hole_value();
}
// The object that indicates a megamorphic state.
static Handle<Object> MegamorphicSentinel(Isolate* isolate) {
return isolate->factory()->undefined_value();
}
private:
int argc_;
CallFunctionFlags flags_;
@ -658,8 +725,8 @@ class CallFunctionStub: public CodeStub {
virtual void PrintName(StringStream* stream);
// Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
class FlagBits: public BitField<CallFunctionFlags, 0, 1> {};
class ArgcBits: public BitField<unsigned, 1, 32 - 1> {};
class FlagBits: public BitField<CallFunctionFlags, 0, 2> {};
class ArgcBits: public BitField<unsigned, 2, 32 - 2> {};
Major MajorKey() { return CallFunction; }
int MinorKey() {
@ -670,6 +737,10 @@ class CallFunctionStub: public CodeStub {
bool ReceiverMightBeImplicit() {
return (flags_ & RECEIVER_MIGHT_BE_IMPLICIT) != 0;
}
bool RecordCallTarget() {
return (flags_ & RECORD_CALL_TARGET) != 0;
}
};
@ -934,6 +1005,8 @@ class ToBooleanStub: public CodeStub {
virtual int GetCodeKind() { return Code::TO_BOOLEAN_IC; }
virtual void PrintName(StringStream* stream);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
Major MajorKey() { return ToBoolean; }
int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) | types_.ToByte(); }

2
deps/v8/src/codegen.cc

@ -218,8 +218,8 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
int CEntryStub::MinorKey() {
int result = (save_doubles_ == kSaveFPRegs) ? 1 : 0;
ASSERT(result_size_ == 1 || result_size_ == 2);
int result = save_doubles_ ? 1 : 0;
#ifdef _WIN64
return result | ((result_size_ == 1) ? 0 : 2);
#else

77
deps/v8/src/compiler-intrinsics.h

@ -0,0 +1,77 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_COMPILER_INTRINSICS_H_
#define V8_COMPILER_INTRINSICS_H_
namespace v8 {
namespace internal {
class CompilerIntrinsics {
public:
// Returns number of zero bits preceding least significant 1 bit.
// Undefined for zero value.
INLINE(static int CountTrailingZeros(uint32_t value));
// Returns number of zero bits following most significant 1 bit.
// Undefined for zero value.
INLINE(static int CountLeadingZeros(uint32_t value));
};
#ifdef __GNUC__
int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
return __builtin_ctz(value);
}
int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
return __builtin_clz(value);
}
#elif defined(_MSC_VER)
#pragma intrinsic(_BitScanForward)
#pragma intrinsic(_BitScanReverse)
int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
unsigned long result; //NOLINT
_BitScanForward(&result, static_cast<long>(value)); //NOLINT
return static_cast<int>(result);
}
int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
unsigned long result; //NOLINT
_BitScanReverse(&result, static_cast<long>(value)); //NOLINT
return 31 - static_cast<int>(result);
}
#else
#error Unsupported compiler
#endif
} } // namespace v8::internal
#endif // V8_COMPILER_INTRINSICS_H_

3
deps/v8/src/compiler.cc

@ -36,6 +36,7 @@
#include "full-codegen.h"
#include "gdb-jit.h"
#include "hydrogen.h"
#include "isolate-inl.h"
#include "lithium.h"
#include "liveedit.h"
#include "parser.h"
@ -275,7 +276,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
}
Handle<Context> global_context(info->closure()->context()->global_context());
TypeFeedbackOracle oracle(code, global_context);
TypeFeedbackOracle oracle(code, global_context, info->isolate());
HGraphBuilder builder(info, &oracle);
HPhase phase(HPhase::kTotal);
HGraph* graph = builder.CreateGraph();

100
deps/v8/src/contexts.cc

@ -86,14 +86,14 @@ void Context::set_global_proxy(JSObject* object) {
Handle<Object> Context::Lookup(Handle<String> name,
ContextLookupFlags flags,
int* index_,
int* index,
PropertyAttributes* attributes,
BindingFlags* binding_flags) {
Isolate* isolate = GetIsolate();
Handle<Context> context(this, isolate);
bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
*index_ = -1;
*index = -1;
*attributes = ABSENT;
*binding_flags = MISSING_BINDING;
@ -110,70 +110,50 @@ Handle<Object> Context::Lookup(Handle<String> name,
PrintF("\n");
}
// Check extension/with/global object.
if (!context->IsBlockContext() && context->has_extension()) {
if (context->IsCatchContext()) {
// Catch contexts have the variable name in the extension slot.
if (name->Equals(String::cast(context->extension()))) {
if (FLAG_trace_contexts) {
PrintF("=> found in catch context\n");
}
*index_ = Context::THROWN_OBJECT_INDEX;
*attributes = NONE;
*binding_flags = MUTABLE_IS_INITIALIZED;
return context;
}
// 1. Check global objects, subjects of with, and extension objects.
if (context->IsGlobalContext() ||
context->IsWithContext() ||
(context->IsFunctionContext() && context->has_extension())) {
Handle<JSObject> object(JSObject::cast(context->extension()), isolate);
// Context extension objects needs to behave as if they have no
// prototype. So even if we want to follow prototype chains, we need
// to only do a local lookup for context extension objects.
if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
object->IsJSContextExtensionObject()) {
*attributes = object->GetLocalPropertyAttribute(*name);
} else {
ASSERT(context->IsGlobalContext() ||
context->IsFunctionContext() ||
context->IsWithContext());
// Global, function, and with contexts may have an object in the
// extension slot.
Handle<JSObject> extension(JSObject::cast(context->extension()),
isolate);
// Context extension objects needs to behave as if they have no
// prototype. So even if we want to follow prototype chains, we
// need to only do a local lookup for context extension objects.
if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
extension->IsJSContextExtensionObject()) {
*attributes = extension->GetLocalPropertyAttribute(*name);
} else {
*attributes = extension->GetPropertyAttribute(*name);
}
if (*attributes != ABSENT) {
// property found
if (FLAG_trace_contexts) {
PrintF("=> found property in context object %p\n",
reinterpret_cast<void*>(*extension));
}
return extension;
*attributes = object->GetPropertyAttribute(*name);
}
if (*attributes != ABSENT) {
if (FLAG_trace_contexts) {
PrintF("=> found property in context object %p\n",
reinterpret_cast<void*>(*object));
}
return object;
}
}
// Check serialized scope information of functions and blocks. Only
// functions can have parameters, and a function name.
// 2. Check the context proper if it has slots.
if (context->IsFunctionContext() || context->IsBlockContext()) {
// We may have context-local slots. Check locals in the context.
// Use serialized scope information of functions and blocks to search
// for the context index.
Handle<SerializedScopeInfo> scope_info;
if (context->IsFunctionContext()) {
scope_info = Handle<SerializedScopeInfo>(
context->closure()->shared()->scope_info(), isolate);
} else {
ASSERT(context->IsBlockContext());
scope_info = Handle<SerializedScopeInfo>(
SerializedScopeInfo::cast(context->extension()), isolate);
}
Variable::Mode mode;
int index = scope_info->ContextSlotIndex(*name, &mode);
ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
if (index >= 0) {
int slot_index = scope_info->ContextSlotIndex(*name, &mode);
ASSERT(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
if (slot_index >= 0) {
if (FLAG_trace_contexts) {
PrintF("=> found local in context slot %d (mode = %d)\n",
index, mode);
slot_index, mode);
}
*index_ = index;
*index = slot_index;
// Note: Fixed context slots are statically allocated by the compiler.
// Statically allocated variables always have a statically known mode,
// which is the mode with which they were declared when added to the
@ -206,22 +186,34 @@ Handle<Object> Context::Lookup(Handle<String> name,
// Check the slot corresponding to the intermediate context holding
// only the function name variable.
if (follow_context_chain) {
int index = scope_info->FunctionContextSlotIndex(*name);
if (index >= 0) {
if (follow_context_chain && context->IsFunctionContext()) {
int function_index = scope_info->FunctionContextSlotIndex(*name);
if (function_index >= 0) {
if (FLAG_trace_contexts) {
PrintF("=> found intermediate function in context slot %d\n",
index);
function_index);
}
*index_ = index;
*index = function_index;
*attributes = READ_ONLY;
*binding_flags = IMMUTABLE_IS_INITIALIZED;
return context;
}
}
} else if (context->IsCatchContext()) {
// Catch contexts have the variable name in the extension slot.
if (name->Equals(String::cast(context->extension()))) {
if (FLAG_trace_contexts) {
PrintF("=> found in catch context\n");
}
*index = Context::THROWN_OBJECT_INDEX;
*attributes = NONE;
*binding_flags = MUTABLE_IS_INITIALIZED;
return context;
}
}
// Proceed with the previous context.
// 3. Prepare to continue with the previous (next outermost) context.
if (context->IsGlobalContext()) {
follow_context_chain = false;
} else {

41
deps/v8/src/contexts.h

@ -134,6 +134,8 @@ enum BindingFlags {
V(MAP_CACHE_INDEX, Object, map_cache) \
V(CONTEXT_DATA_INDEX, Object, data) \
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
to_complete_property_descriptor) \
V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap)
@ -252,6 +254,7 @@ class Context: public FixedArray {
OUT_OF_MEMORY_INDEX,
CONTEXT_DATA_INDEX,
ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
DERIVED_HAS_TRAP_INDEX,
DERIVED_GET_TRAP_INDEX,
DERIVED_SET_TRAP_INDEX,
@ -330,12 +333,6 @@ class Context: public FixedArray {
// Mark the global context with out of memory.
inline void mark_out_of_memory();
// The exception holder is the object used as a with object in
// the implementation of a catch block.
bool is_exception_holder(Object* object) {
return IsCatchContext() && extension() == object;
}
// A global context hold a list of all functions which have been optimized.
void AddOptimizedFunction(JSFunction* function);
void RemoveOptimizedFunction(JSFunction* function);
@ -355,29 +352,25 @@ class Context: public FixedArray {
#undef GLOBAL_CONTEXT_FIELD_ACCESSORS
// Lookup the the slot called name, starting with the current context.
// There are 4 possible outcomes:
//
// 1) index_ >= 0 && result->IsContext():
// most common case, the result is a Context, and index is the
// context slot index, and the slot exists.
// attributes == READ_ONLY for the function name variable, NONE otherwise.
// There are three possibilities:
//
// 2) index_ >= 0 && result->IsJSObject():
// the result is the JSObject arguments object, the index is the parameter
// index, i.e., key into the arguments object, and the property exists.
// attributes != ABSENT.
// 1) result->IsContext():
// The binding was found in a context. *index is always the
// non-negative slot index. *attributes is NONE for var and let
// declarations, READ_ONLY for const declarations (never ABSENT).
//
// 3) index_ < 0 && result->IsJSObject():
// the result is the JSObject extension context or the global object,
// and the name is the property name, and the property exists.
// attributes != ABSENT.
// 2) result->IsJSObject():
// The binding was found as a named property in a context extension
// object (i.e., was introduced via eval), as a property on the subject
// of with, or as a property of the global object. *index is -1 and
// *attributes is not ABSENT.
//
// 4) index_ < 0 && result.is_null():
// there was no context found with the corresponding property.
// attributes == ABSENT.
// 3) result.is_null():
// There was no binding found, *index is always -1 and *attributes is
// always ABSENT.
Handle<Object> Lookup(Handle<String> name,
ContextLookupFlags flags,
int* index_,
int* index,
PropertyAttributes* attributes,
BindingFlags* binding_flags);

2
deps/v8/src/conversions-inl.h

@ -47,7 +47,7 @@ namespace v8 {
namespace internal {
static inline double JunkStringValue() {
return std::numeric_limits<double>::quiet_NaN();
return BitCast<double, uint64_t>(kQuietNaNMask);
}

2
deps/v8/src/conversions.h

@ -28,8 +28,6 @@
#ifndef V8_CONVERSIONS_H_
#define V8_CONVERSIONS_H_
#include <limits>
#include "utils.h"
namespace v8 {

2
deps/v8/src/cpu-profiler.cc

@ -551,12 +551,12 @@ void CpuProfiler::StopProcessor() {
sampler->Stop();
need_to_stop_sampler_ = false;
}
NoBarrier_Store(&is_profiling_, false);
processor_->Stop();
processor_->Join();
delete processor_;
delete generator_;
processor_ = NULL;
NoBarrier_Store(&is_profiling_, false);
generator_ = NULL;
logger->logging_nesting_ = saved_logging_nesting_;
}

5
deps/v8/src/d8-debug.cc

@ -1,4 +1,4 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -25,6 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifdef ENABLE_DEBUGGER_SUPPORT
#include "d8.h"
#include "d8-debug.h"
@ -367,3 +368,5 @@ void KeyboardThread::Run() {
} // namespace v8
#endif // ENABLE_DEBUGGER_SUPPORT

34
deps/v8/src/d8.cc

@ -146,11 +146,11 @@ bool Shell::ExecuteString(Handle<String> source,
Handle<Value> name,
bool print_result,
bool report_exceptions) {
#ifndef V8_SHARED
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
bool FLAG_debugger = i::FLAG_debugger;
#else
bool FLAG_debugger = false;
#endif // V8_SHARED
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
HandleScope handle_scope;
TryCatch try_catch;
options.script_executed = true;
@ -594,6 +594,7 @@ void Shell::InstallUtilityScript() {
Context::Scope utility_scope(utility_context_);
#ifdef ENABLE_DEBUGGER_SUPPORT
if (i::FLAG_debugger) printf("JavaScript debugger enabled\n");
// Install the debugger object in the utility scope
i::Debug* debug = i::Isolate::Current()->debug();
debug->Load();
@ -816,7 +817,7 @@ void Shell::OnExit() {
static FILE* FOpen(const char* path, const char* mode) {
#if (defined(_WIN32) || defined(_WIN64))
#if defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64))
FILE* result;
if (fopen_s(&result, path, mode) == 0) {
return result;
@ -900,9 +901,6 @@ void Shell::RunShell() {
#ifndef V8_SHARED
console = LineEditor::Get();
printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
if (i::FLAG_debugger) {
printf("JavaScript debugger enabled\n");
}
console->Open();
while (true) {
i::SmartArrayPointer<char> input = console->Prompt(Shell::kPrompt);
@ -1253,14 +1251,22 @@ int Shell::RunMain(int argc, char* argv[]) {
Locker lock;
HandleScope scope;
Persistent<Context> context = CreateEvaluationContext();
if (options.last_run) {
// Keep using the same context in the interactive shell.
evaluation_context_ = context;
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
// If the interactive debugger is enabled make sure to activate
// it before running the files passed on the command line.
if (i::FLAG_debugger) {
InstallUtilityScript();
}
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
}
{
Context::Scope cscope(context);
options.isolate_sources[0].Execute();
}
if (options.last_run) {
// Keep using the same context in the interactive shell
evaluation_context_ = context;
} else {
if (!options.last_run) {
context.Dispose();
}
@ -1331,9 +1337,11 @@ int Shell::Main(int argc, char* argv[]) {
if (( options.interactive_shell
|| !options.script_executed )
&& !options.test_shell ) {
#ifndef V8_SHARED
InstallUtilityScript();
#endif // V8_SHARED
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
if (!i::FLAG_debugger) {
InstallUtilityScript();
}
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
RunShell();
}

226
deps/v8/src/debug.cc

@ -40,6 +40,7 @@
#include "global-handles.h"
#include "ic.h"
#include "ic-inl.h"
#include "isolate-inl.h"
#include "list.h"
#include "messages.h"
#include "natives.h"
@ -401,15 +402,15 @@ void BreakLocationIterator::PrepareStepIn() {
// Step in can only be prepared if currently positioned on an IC call,
// construct call or CallFunction stub call.
Address target = rinfo()->target_address();
Handle<Code> code(Code::GetCodeFromTargetAddress(target));
if (code->is_call_stub() || code->is_keyed_call_stub()) {
Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
if (target_code->is_call_stub() || target_code->is_keyed_call_stub()) {
// Step in through IC call is handled by the runtime system. Therefore make
// sure that the any current IC is cleared and the runtime system is
// called. If the executing code has a debug break at the location change
// the call in the original code as it is the code there that will be
// executed in place of the debug break call.
Handle<Code> stub = ComputeCallDebugPrepareStepIn(code->arguments_count(),
code->kind());
Handle<Code> stub = ComputeCallDebugPrepareStepIn(
target_code->arguments_count(), target_code->kind());
if (IsDebugBreak()) {
original_rinfo()->set_target_address(stub->entry());
} else {
@ -419,7 +420,7 @@ void BreakLocationIterator::PrepareStepIn() {
#ifdef DEBUG
// All the following stuff is needed only for assertion checks so the code
// is wrapped in ifdef.
Handle<Code> maybe_call_function_stub = code;
Handle<Code> maybe_call_function_stub = target_code;
if (IsDebugBreak()) {
Address original_target = original_rinfo()->target_address();
maybe_call_function_stub =
@ -436,8 +437,9 @@ void BreakLocationIterator::PrepareStepIn() {
// Step in through CallFunction stub should also be prepared by caller of
// this function (Debug::PrepareStep) which should flood target function
// with breakpoints.
ASSERT(RelocInfo::IsConstructCall(rmode()) || code->is_inline_cache_stub()
|| is_call_function_stub);
ASSERT(RelocInfo::IsConstructCall(rmode()) ||
target_code->is_inline_cache_stub() ||
is_call_function_stub);
#endif
}
}
@ -474,11 +476,11 @@ void BreakLocationIterator::SetDebugBreakAtIC() {
RelocInfo::Mode mode = rmode();
if (RelocInfo::IsCodeTarget(mode)) {
Address target = rinfo()->target_address();
Handle<Code> code(Code::GetCodeFromTargetAddress(target));
Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
// Patch the code to invoke the builtin debug break function matching the
// calling convention used by the call site.
Handle<Code> dbgbrk_code(Debug::FindDebugBreak(code, mode));
Handle<Code> dbgbrk_code(Debug::FindDebugBreak(target_code, mode));
rinfo()->set_target_address(dbgbrk_code->entry());
}
}
@ -772,7 +774,7 @@ bool Debug::CompileDebuggerScript(int index) {
// Execute the shared function in the debugger context.
Handle<Context> context = isolate->global_context();
bool caught_exception = false;
bool caught_exception;
Handle<JSFunction> function =
factory->NewFunctionFromSharedFunctionInfo(function_info, context);
@ -1103,14 +1105,13 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id());
// Call HandleBreakPointx.
bool caught_exception = false;
const int argc = 2;
Object** argv[argc] = {
break_id.location(),
reinterpret_cast<Object**>(break_point_object.location())
};
bool caught_exception;
Handle<Object> argv[] = { break_id, break_point_object };
Handle<Object> result = Execution::TryCall(check_break_point,
isolate_->js_builtins_object(), argc, argv, &caught_exception);
isolate_->js_builtins_object(),
ARRAY_SIZE(argv),
argv,
&caught_exception);
// If exception or non boolean result handle as not triggered
if (caught_exception || !result->IsBoolean()) {
@ -1732,6 +1733,10 @@ void Debug::PrepareForBreakPoints() {
if (!has_break_points_) {
Deoptimizer::DeoptimizeAll();
// We are going to iterate heap to find all functions without
// debug break slots.
isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
AssertNoAllocation no_allocation;
Builtins* builtins = isolate_->builtins();
Code* lazy_compile = builtins->builtin(Builtins::kLazyCompile);
@ -1997,9 +2002,10 @@ void Debug::CreateScriptCache() {
// Perform two GCs to get rid of all unreferenced scripts. The first GC gets
// rid of all the cached script wrappers and the second gets rid of the
// scripts which are no longer referenced.
heap->CollectAllGarbage(false);
heap->CollectAllGarbage(false);
// scripts which are no longer referenced. The second also sweeps precisely,
// which saves us doing yet another GC to make the heap iterable.
heap->CollectAllGarbage(Heap::kNoGCFlags);
heap->CollectAllGarbage(Heap::kMakeHeapIterableMask);
ASSERT(script_cache_ == NULL);
script_cache_ = new ScriptCache();
@ -2007,6 +2013,8 @@ void Debug::CreateScriptCache() {
// Scan heap for Script objects.
int count = 0;
HeapIterator iterator;
AssertNoAllocation no_allocation;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
script_cache_->Add(Handle<Script>(Script::cast(obj)));
@ -2047,7 +2055,7 @@ Handle<FixedArray> Debug::GetLoadedScripts() {
// Perform GC to get unreferenced scripts evicted from the cache before
// returning the content.
isolate_->heap()->CollectAllGarbage(false);
isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags);
// Get the scripts from the cache.
return script_cache_->GetScripts();
@ -2093,7 +2101,8 @@ Debugger::~Debugger() {
Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
int argc, Object*** argv,
int argc,
Handle<Object> argv[],
bool* caught_exception) {
ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
@ -2110,7 +2119,9 @@ Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
Handle<Object> js_object = Execution::TryCall(
Handle<JSFunction>::cast(constructor),
Handle<JSObject>(isolate_->debug()->debug_context()->global()),
argc, argv, caught_exception);
argc,
argv,
caught_exception);
return js_object;
}
@ -2119,10 +2130,11 @@ Handle<Object> Debugger::MakeExecutionState(bool* caught_exception) {
// Create the execution state object.
Handle<Object> break_id = isolate_->factory()->NewNumberFromInt(
isolate_->debug()->break_id());
const int argc = 1;
Object** argv[argc] = { break_id.location() };
Handle<Object> argv[] = { break_id };
return MakeJSObject(CStrVector("MakeExecutionState"),
argc, argv, caught_exception);
ARRAY_SIZE(argv),
argv,
caught_exception);
}
@ -2130,11 +2142,9 @@ Handle<Object> Debugger::MakeBreakEvent(Handle<Object> exec_state,
Handle<Object> break_points_hit,
bool* caught_exception) {
// Create the new break event object.
const int argc = 2;
Object** argv[argc] = { exec_state.location(),
break_points_hit.location() };
Handle<Object> argv[] = { exec_state, break_points_hit };
return MakeJSObject(CStrVector("MakeBreakEvent"),
argc,
ARRAY_SIZE(argv),
argv,
caught_exception);
}
@ -2146,23 +2156,24 @@ Handle<Object> Debugger::MakeExceptionEvent(Handle<Object> exec_state,
bool* caught_exception) {
Factory* factory = isolate_->factory();
// Create the new exception event object.
const int argc = 3;
Object** argv[argc] = { exec_state.location(),
exception.location(),
uncaught ? factory->true_value().location() :
factory->false_value().location()};
Handle<Object> argv[] = { exec_state,
exception,
factory->ToBoolean(uncaught) };
return MakeJSObject(CStrVector("MakeExceptionEvent"),
argc, argv, caught_exception);
ARRAY_SIZE(argv),
argv,
caught_exception);
}
Handle<Object> Debugger::MakeNewFunctionEvent(Handle<Object> function,
bool* caught_exception) {
// Create the new function event object.
const int argc = 1;
Object** argv[argc] = { function.location() };
Handle<Object> argv[] = { function };
return MakeJSObject(CStrVector("MakeNewFunctionEvent"),
argc, argv, caught_exception);
ARRAY_SIZE(argv),
argv,
caught_exception);
}
@ -2173,14 +2184,11 @@ Handle<Object> Debugger::MakeCompileEvent(Handle<Script> script,
// Create the compile event object.
Handle<Object> exec_state = MakeExecutionState(caught_exception);
Handle<Object> script_wrapper = GetScriptWrapper(script);
const int argc = 3;
Object** argv[argc] = { exec_state.location(),
script_wrapper.location(),
before ? factory->true_value().location() :
factory->false_value().location() };
Handle<Object> argv[] = { exec_state,
script_wrapper,
factory->ToBoolean(before) };
return MakeJSObject(CStrVector("MakeCompileEvent"),
argc,
ARRAY_SIZE(argv),
argv,
caught_exception);
}
@ -2191,11 +2199,10 @@ Handle<Object> Debugger::MakeScriptCollectedEvent(int id,
// Create the script collected event object.
Handle<Object> exec_state = MakeExecutionState(caught_exception);
Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id));
const int argc = 2;
Object** argv[argc] = { exec_state.location(), id_object.location() };
Handle<Object> argv[] = { exec_state, id_object };
return MakeJSObject(CStrVector("MakeScriptCollectedEvent"),
argc,
ARRAY_SIZE(argv),
argv,
caught_exception);
}
@ -2345,12 +2352,13 @@ void Debugger::OnAfterCompile(Handle<Script> script,
Handle<JSValue> wrapper = GetScriptWrapper(script);
// Call UpdateScriptBreakPoints expect no exceptions.
bool caught_exception = false;
const int argc = 1;
Object** argv[argc] = { reinterpret_cast<Object**>(wrapper.location()) };
bool caught_exception;
Handle<Object> argv[] = { wrapper };
Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points),
Isolate::Current()->js_builtins_object(), argc, argv,
&caught_exception);
Isolate::Current()->js_builtins_object(),
ARRAY_SIZE(argv),
argv,
&caught_exception);
if (caught_exception) {
return;
}
@ -2481,13 +2489,16 @@ void Debugger::CallJSEventCallback(v8::DebugEvent event,
Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_));
// Invoke the JavaScript debug event listener.
const int argc = 4;
Object** argv[argc] = { Handle<Object>(Smi::FromInt(event)).location(),
exec_state.location(),
Handle<Object>::cast(event_data).location(),
event_listener_data_.location() };
bool caught_exception = false;
Execution::TryCall(fun, isolate_->global(), argc, argv, &caught_exception);
Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event)),
exec_state,
event_data,
event_listener_data_ };
bool caught_exception;
Execution::TryCall(fun,
isolate_->global(),
ARRAY_SIZE(argv),
argv,
&caught_exception);
// Silently ignore exceptions from debug event listeners.
}
@ -2856,12 +2867,11 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun,
return isolate_->factory()->undefined_value();
}
static const int kArgc = 2;
Object** argv[kArgc] = { exec_state.location(), data.location() };
Handle<Object> argv[] = { exec_state, data };
Handle<Object> result = Execution::Call(
fun,
Handle<Object>(isolate_->debug()->debug_context_->global_proxy()),
kArgc,
ARRAY_SIZE(argv),
argv,
pending_exception);
return result;
@ -2929,6 +2939,94 @@ void Debugger::CallMessageDispatchHandler() {
}
EnterDebugger::EnterDebugger()
: isolate_(Isolate::Current()),
prev_(isolate_->debug()->debugger_entry()),
it_(isolate_),
has_js_frames_(!it_.done()),
save_(isolate_) {
Debug* debug = isolate_->debug();
ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT));
ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK));
// Link recursive debugger entry.
debug->set_debugger_entry(this);
// Store the previous break id and frame id.
break_id_ = debug->break_id();
break_frame_id_ = debug->break_frame_id();
// Create the new break info. If there is no JavaScript frames there is no
// break frame id.
if (has_js_frames_) {
debug->NewBreak(it_.frame()->id());
} else {
debug->NewBreak(StackFrame::NO_ID);
}
// Make sure that debugger is loaded and enter the debugger context.
load_failed_ = !debug->Load();
if (!load_failed_) {
// NOTE the member variable save which saves the previous context before
// this change.
isolate_->set_context(*debug->debug_context());
}
}
EnterDebugger::~EnterDebugger() {
ASSERT(Isolate::Current() == isolate_);
Debug* debug = isolate_->debug();
// Restore to the previous break state.
debug->SetBreak(break_frame_id_, break_id_);
// Check for leaving the debugger.
if (prev_ == NULL) {
// Clear mirror cache when leaving the debugger. Skip this if there is a
// pending exception as clearing the mirror cache calls back into
// JavaScript. This can happen if the v8::Debug::Call is used in which
// case the exception should end up in the calling code.
if (!isolate_->has_pending_exception()) {
// Try to avoid any pending debug break breaking in the clear mirror
// cache JavaScript code.
if (isolate_->stack_guard()->IsDebugBreak()) {
debug->set_interrupts_pending(DEBUGBREAK);
isolate_->stack_guard()->Continue(DEBUGBREAK);
}
debug->ClearMirrorCache();
}
// Request preemption and debug break when leaving the last debugger entry
// if any of these where recorded while debugging.
if (debug->is_interrupt_pending(PREEMPT)) {
// This re-scheduling of preemption is to avoid starvation in some
// debugging scenarios.
debug->clear_interrupt_pending(PREEMPT);
isolate_->stack_guard()->Preempt();
}
if (debug->is_interrupt_pending(DEBUGBREAK)) {
debug->clear_interrupt_pending(DEBUGBREAK);
isolate_->stack_guard()->DebugBreak();
}
// If there are commands in the queue when leaving the debugger request
// that these commands are processed.
if (isolate_->debugger()->HasCommands()) {
isolate_->stack_guard()->DebugCommand();
}
// If leaving the debugger with the debugger no longer active unload it.
if (!isolate_->debugger()->IsDebuggerActive()) {
isolate_->debugger()->UnloadDebugger();
}
}
// Leaving this debugger entry.
debug->set_debugger_entry(prev_);
}
MessageImpl MessageImpl::NewEvent(DebugEvent event,
bool running,
Handle<JSObject> exec_state,

90
deps/v8/src/debug.h

@ -705,7 +705,8 @@ class Debugger {
void DebugRequest(const uint16_t* json_request, int length);
Handle<Object> MakeJSObject(Vector<const char> constructor_name,
int argc, Object*** argv,
int argc,
Handle<Object> argv[],
bool* caught_exception);
Handle<Object> MakeExecutionState(bool* caught_exception);
Handle<Object> MakeBreakEvent(Handle<Object> exec_state,
@ -869,91 +870,8 @@ class Debugger {
// some reason could not be entered FailedToEnter will return true.
class EnterDebugger BASE_EMBEDDED {
public:
EnterDebugger()
: isolate_(Isolate::Current()),
prev_(isolate_->debug()->debugger_entry()),
it_(isolate_),
has_js_frames_(!it_.done()),
save_(isolate_) {
Debug* debug = isolate_->debug();
ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT));
ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK));
// Link recursive debugger entry.
debug->set_debugger_entry(this);
// Store the previous break id and frame id.
break_id_ = debug->break_id();
break_frame_id_ = debug->break_frame_id();
// Create the new break info. If there is no JavaScript frames there is no
// break frame id.
if (has_js_frames_) {
debug->NewBreak(it_.frame()->id());
} else {
debug->NewBreak(StackFrame::NO_ID);
}
// Make sure that debugger is loaded and enter the debugger context.
load_failed_ = !debug->Load();
if (!load_failed_) {
// NOTE the member variable save which saves the previous context before
// this change.
isolate_->set_context(*debug->debug_context());
}
}
~EnterDebugger() {
ASSERT(Isolate::Current() == isolate_);
Debug* debug = isolate_->debug();
// Restore to the previous break state.
debug->SetBreak(break_frame_id_, break_id_);
// Check for leaving the debugger.
if (prev_ == NULL) {
// Clear mirror cache when leaving the debugger. Skip this if there is a
// pending exception as clearing the mirror cache calls back into
// JavaScript. This can happen if the v8::Debug::Call is used in which
// case the exception should end up in the calling code.
if (!isolate_->has_pending_exception()) {
// Try to avoid any pending debug break breaking in the clear mirror
// cache JavaScript code.
if (isolate_->stack_guard()->IsDebugBreak()) {
debug->set_interrupts_pending(DEBUGBREAK);
isolate_->stack_guard()->Continue(DEBUGBREAK);
}
debug->ClearMirrorCache();
}
// Request preemption and debug break when leaving the last debugger entry
// if any of these where recorded while debugging.
if (debug->is_interrupt_pending(PREEMPT)) {
// This re-scheduling of preemption is to avoid starvation in some
// debugging scenarios.
debug->clear_interrupt_pending(PREEMPT);
isolate_->stack_guard()->Preempt();
}
if (debug->is_interrupt_pending(DEBUGBREAK)) {
debug->clear_interrupt_pending(DEBUGBREAK);
isolate_->stack_guard()->DebugBreak();
}
// If there are commands in the queue when leaving the debugger request
// that these commands are processed.
if (isolate_->debugger()->HasCommands()) {
isolate_->stack_guard()->DebugCommand();
}
// If leaving the debugger with the debugger no longer active unload it.
if (!isolate_->debugger()->IsDebuggerActive()) {
isolate_->debugger()->UnloadDebugger();
}
}
// Leaving this debugger entry.
debug->set_debugger_entry(prev_);
}
EnterDebugger();
~EnterDebugger();
// Check whether the debugger could be entered.
inline bool FailedToEnter() { return load_failed_; }

70
deps/v8/src/deoptimizer.cc

@ -52,11 +52,13 @@ DeoptimizerData::DeoptimizerData() {
DeoptimizerData::~DeoptimizerData() {
if (eager_deoptimization_entry_code_ != NULL) {
eager_deoptimization_entry_code_->Free(EXECUTABLE);
Isolate::Current()->memory_allocator()->Free(
eager_deoptimization_entry_code_);
eager_deoptimization_entry_code_ = NULL;
}
if (lazy_deoptimization_entry_code_ != NULL) {
lazy_deoptimization_entry_code_->Free(EXECUTABLE);
Isolate::Current()->memory_allocator()->Free(
lazy_deoptimization_entry_code_);
lazy_deoptimization_entry_code_ = NULL;
}
}
@ -71,6 +73,8 @@ void DeoptimizerData::Iterate(ObjectVisitor* v) {
#endif
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
Deoptimizer* Deoptimizer::New(JSFunction* function,
BailoutType type,
unsigned bailout_id,
@ -319,6 +323,8 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
input_(NULL),
output_count_(0),
output_(NULL),
frame_alignment_marker_(isolate->heap()->frame_alignment_marker()),
has_alignment_padding_(0),
deferred_heap_numbers_(0) {
if (FLAG_trace_deopt && type != OSR) {
if (type == DEBUGGER) {
@ -343,6 +349,26 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
if (type == EAGER) {
ASSERT(from == NULL);
optimized_code_ = function_->code();
if (FLAG_trace_deopt && FLAG_code_comments) {
// Print instruction associated with this bailout.
const char* last_comment = NULL;
int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
| RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
for (RelocIterator it(optimized_code_, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
if (info->rmode() == RelocInfo::COMMENT) {
last_comment = reinterpret_cast<const char*>(info->data());
}
if (info->rmode() == RelocInfo::RUNTIME_ENTRY) {
unsigned id = Deoptimizer::GetDeoptimizationId(
info->target_address(), Deoptimizer::EAGER);
if (id == bailout_id && last_comment != NULL) {
PrintF(" %s\n", last_comment);
break;
}
}
}
}
} else if (type == LAZY) {
optimized_code_ = FindDeoptimizingCodeFromAddress(from);
ASSERT(optimized_code_ != NULL);
@ -386,7 +412,7 @@ void Deoptimizer::DeleteFrameDescriptions() {
Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
ASSERT(id >= 0);
if (id >= kNumberOfEntries) return NULL;
LargeObjectChunk* base = NULL;
MemoryChunk* base = NULL;
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
if (data->eager_deoptimization_entry_code_ == NULL) {
@ -400,12 +426,12 @@ Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
base = data->lazy_deoptimization_entry_code_;
}
return
static_cast<Address>(base->GetStartAddress()) + (id * table_entry_size_);
static_cast<Address>(base->body()) + (id * table_entry_size_);
}
int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
LargeObjectChunk* base = NULL;
MemoryChunk* base = NULL;
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
base = data->eager_deoptimization_entry_code_;
@ -413,14 +439,14 @@ int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
base = data->lazy_deoptimization_entry_code_;
}
if (base == NULL ||
addr < base->GetStartAddress() ||
addr >= base->GetStartAddress() +
addr < base->body() ||
addr >= base->body() +
(kNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry;
}
ASSERT_EQ(0,
static_cast<int>(addr - base->GetStartAddress()) % table_entry_size_);
return static_cast<int>(addr - base->GetStartAddress()) / table_entry_size_;
static_cast<int>(addr - base->body()) % table_entry_size_);
return static_cast<int>(addr - base->body()) / table_entry_size_;
}
@ -462,6 +488,8 @@ int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
}
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
void Deoptimizer::DoComputeOutputFrames() {
if (bailout_type_ == OSR) {
DoComputeOsrOutputFrame();
@ -613,11 +641,13 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
intptr_t input_value = input_->GetRegister(input_reg);
if (FLAG_trace_deopt) {
PrintF(
" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s\n",
" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s ",
output_[frame_index]->GetTop() + output_offset,
output_offset,
input_value,
converter.NameOfCPURegister(input_reg));
reinterpret_cast<Object*>(input_value)->ShortPrint();
PrintF("\n");
}
output_[frame_index]->SetFrameSlot(output_offset, input_value);
return;
@ -675,10 +705,12 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] ",
output_offset,
input_value,
input_offset);
reinterpret_cast<Object*>(input_value)->ShortPrint();
PrintF("\n");
}
output_[frame_index]->SetFrameSlot(output_offset, input_value);
return;
@ -953,7 +985,10 @@ void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
for (uint32_t i = 0; i < table_length; ++i) {
uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
Address pc_after = unoptimized_code->instruction_start() + pc_offset;
PatchStackCheckCodeAt(pc_after, check_code, replacement_code);
PatchStackCheckCodeAt(unoptimized_code,
pc_after,
check_code,
replacement_code);
stack_check_cursor += 2 * kIntSize;
}
}
@ -1039,7 +1074,7 @@ void Deoptimizer::AddDoubleValue(intptr_t slot_address,
}
LargeObjectChunk* Deoptimizer::CreateCode(BailoutType type) {
MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
// We cannot run this if the serializer is enabled because this will
// cause us to emit relocation information for the external
// references. This is fine because the deoptimizer's code section
@ -1053,12 +1088,15 @@ LargeObjectChunk* Deoptimizer::CreateCode(BailoutType type) {
masm.GetCode(&desc);
ASSERT(desc.reloc_size == 0);
LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
MemoryChunk* chunk =
Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
EXECUTABLE,
NULL);
if (chunk == NULL) {
V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
}
memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
memcpy(chunk->body(), desc.buffer, desc.instr_size);
CPU::FlushICache(chunk->body(), desc.instr_size);
return chunk;
}

18
deps/v8/src/deoptimizer.h

@ -86,8 +86,8 @@ class DeoptimizerData {
#endif
private:
LargeObjectChunk* eager_deoptimization_entry_code_;
LargeObjectChunk* lazy_deoptimization_entry_code_;
MemoryChunk* eager_deoptimization_entry_code_;
MemoryChunk* lazy_deoptimization_entry_code_;
Deoptimizer* current_;
#ifdef ENABLE_DEBUGGER_SUPPORT
@ -173,7 +173,8 @@ class Deoptimizer : public Malloced {
// Patch stack guard check at instruction before pc_after in
// the unoptimized code to unconditionally call replacement_code.
static void PatchStackCheckCodeAt(Address pc_after,
static void PatchStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
Code* check_code,
Code* replacement_code);
@ -211,6 +212,11 @@ class Deoptimizer : public Malloced {
return OFFSET_OF(Deoptimizer, output_count_);
}
static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
static int frame_alignment_marker_offset() {
return OFFSET_OF(Deoptimizer, frame_alignment_marker_); }
static int has_alignment_padding_offset() {
return OFFSET_OF(Deoptimizer, has_alignment_padding_);
}
static int GetDeoptimizedCodeCount(Isolate* isolate);
@ -285,7 +291,7 @@ class Deoptimizer : public Malloced {
void AddDoubleValue(intptr_t slot_address, double value);
static LargeObjectChunk* CreateCode(BailoutType type);
static MemoryChunk* CreateCode(BailoutType type);
static void GenerateDeoptimizationEntries(
MacroAssembler* masm, int count, BailoutType type);
@ -315,6 +321,10 @@ class Deoptimizer : public Malloced {
// Array of output frame descriptions.
FrameDescription** output_;
// Frames can be dynamically padded on ia32 to align untagged doubles.
Object* frame_alignment_marker_;
intptr_t has_alignment_padding_;
List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
static const int table_entry_size_;

2
deps/v8/src/disassembler.cc

@ -200,7 +200,7 @@ static int DecodeIt(FILE* f,
// Print all the reloc info for this instruction which are not comments.
for (int i = 0; i < pcs.length(); i++) {
// Put together the reloc info
RelocInfo relocinfo(pcs[i], rmodes[i], datas[i]);
RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], NULL);
// Indent the printing of the reloc info.
if (i == 0) {

11
deps/v8/src/elements.cc

@ -227,7 +227,9 @@ class FastElementsAccessor
public:
static MaybeObject* DeleteCommon(JSObject* obj,
uint32_t key) {
ASSERT(obj->HasFastElements() || obj->HasFastArgumentsElements());
ASSERT(obj->HasFastElements() ||
obj->HasFastSmiOnlyElements() ||
obj->HasFastArgumentsElements());
Heap* heap = obj->GetHeap();
FixedArray* backing_store = FixedArray::cast(obj->elements());
if (backing_store->map() == heap->non_strict_arguments_elements_map()) {
@ -596,6 +598,9 @@ ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) {
void ElementsAccessor::InitializeOncePerProcess() {
static struct ConcreteElementsAccessors {
// Use the fast element handler for smi-only arrays. The implementation is
// currently identical.
FastElementsAccessor fast_smi_elements_handler;
FastElementsAccessor fast_elements_handler;
FastDoubleElementsAccessor fast_double_elements_handler;
DictionaryElementsAccessor dictionary_elements_handler;
@ -612,6 +617,7 @@ void ElementsAccessor::InitializeOncePerProcess() {
} element_accessors;
static ElementsAccessor* accessor_array[] = {
&element_accessors.fast_smi_elements_handler,
&element_accessors.fast_elements_handler,
&element_accessors.fast_double_elements_handler,
&element_accessors.dictionary_elements_handler,
@ -627,6 +633,9 @@ void ElementsAccessor::InitializeOncePerProcess() {
&element_accessors.pixel_elements_handler
};
STATIC_ASSERT((sizeof(accessor_array) / sizeof(*accessor_array)) ==
kElementsKindCount);
elements_accessors_ = accessor_array;
}

175
deps/v8/src/execution.cc

@ -33,6 +33,7 @@
#include "bootstrapper.h"
#include "codegen.h"
#include "debug.h"
#include "isolate-inl.h"
#include "runtime-profiler.h"
#include "simulator.h"
#include "v8threads.h"
@ -65,13 +66,13 @@ void StackGuard::reset_limits(const ExecutionAccess& lock) {
}
static Handle<Object> Invoke(bool construct,
Handle<JSFunction> func,
static Handle<Object> Invoke(bool is_construct,
Handle<JSFunction> function,
Handle<Object> receiver,
int argc,
Object*** args,
Handle<Object> args[],
bool* has_pending_exception) {
Isolate* isolate = func->GetIsolate();
Isolate* isolate = function->GetIsolate();
// Entering JavaScript.
VMState state(isolate, JS);
@ -79,21 +80,15 @@ static Handle<Object> Invoke(bool construct,
// Placeholder for return value.
MaybeObject* value = reinterpret_cast<Object*>(kZapValue);
typedef Object* (*JSEntryFunction)(
byte* entry,
Object* function,
Object* receiver,
int argc,
Object*** args);
Handle<Code> code;
if (construct) {
JSConstructEntryStub stub;
code = stub.GetCode();
} else {
JSEntryStub stub;
code = stub.GetCode();
}
typedef Object* (*JSEntryFunction)(byte* entry,
Object* function,
Object* receiver,
int argc,
Object*** args);
Handle<Code> code = is_construct
? isolate->factory()->js_construct_entry_code()
: isolate->factory()->js_entry_code();
// Convert calls on global objects to be calls on the global
// receiver instead to avoid having a 'this' pointer which refers
@ -105,21 +100,22 @@ static Handle<Object> Invoke(bool construct,
// Make sure that the global object of the context we're about to
// make the current one is indeed a global object.
ASSERT(func->context()->global()->IsGlobalObject());
ASSERT(function->context()->global()->IsGlobalObject());
{
// Save and restore context around invocation and block the
// allocation of handles without explicit handle scopes.
SaveContext save(isolate);
NoHandleAllocation na;
JSEntryFunction entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
JSEntryFunction stub_entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
// Call the function through the right JS entry stub.
byte* entry_address = func->code()->entry();
JSFunction* function = *func;
Object* receiver_pointer = *receiver;
value = CALL_GENERATED_CODE(entry, entry_address, function,
receiver_pointer, argc, args);
byte* function_entry = function->code()->entry();
JSFunction* func = *function;
Object* recv = *receiver;
Object*** argv = reinterpret_cast<Object***>(args);
value =
CALL_GENERATED_CODE(stub_entry, function_entry, func, recv, argc, argv);
}
#ifdef DEBUG
@ -148,9 +144,11 @@ static Handle<Object> Invoke(bool construct,
Handle<Object> Execution::Call(Handle<Object> callable,
Handle<Object> receiver,
int argc,
Object*** args,
Handle<Object> argv[],
bool* pending_exception,
bool convert_receiver) {
*pending_exception = false;
if (!callable->IsJSFunction()) {
callable = TryGetFunctionDelegate(callable, pending_exception);
if (*pending_exception) return callable;
@ -172,13 +170,15 @@ Handle<Object> Execution::Call(Handle<Object> callable,
if (*pending_exception) return callable;
}
return Invoke(false, func, receiver, argc, args, pending_exception);
return Invoke(false, func, receiver, argc, argv, pending_exception);
}
Handle<Object> Execution::New(Handle<JSFunction> func, int argc,
Object*** args, bool* pending_exception) {
return Invoke(true, func, Isolate::Current()->global(), argc, args,
Handle<Object> Execution::New(Handle<JSFunction> func,
int argc,
Handle<Object> argv[],
bool* pending_exception) {
return Invoke(true, func, Isolate::Current()->global(), argc, argv,
pending_exception);
}
@ -186,7 +186,7 @@ Handle<Object> Execution::New(Handle<JSFunction> func, int argc,
Handle<Object> Execution::TryCall(Handle<JSFunction> func,
Handle<Object> receiver,
int argc,
Object*** args,
Handle<Object> args[],
bool* caught_exception) {
// Enter a try-block while executing the JavaScript code. To avoid
// duplicate error printing it must be non-verbose. Also, to avoid
@ -195,6 +195,7 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
v8::TryCatch catcher;
catcher.SetVerbose(false);
catcher.SetCaptureMessage(false);
*caught_exception = false;
Handle<Object> result = Invoke(false, func, receiver, argc, args,
caught_exception);
@ -377,7 +378,7 @@ void StackGuard::DisableInterrupts() {
bool StackGuard::IsInterrupted() {
ExecutionAccess access(isolate_);
return thread_local_.interrupt_flags_ & INTERRUPT;
return (thread_local_.interrupt_flags_ & INTERRUPT) != 0;
}
@ -403,7 +404,7 @@ void StackGuard::Preempt() {
bool StackGuard::IsTerminateExecution() {
ExecutionAccess access(isolate_);
return thread_local_.interrupt_flags_ & TERMINATE;
return (thread_local_.interrupt_flags_ & TERMINATE) != 0;
}
@ -416,7 +417,7 @@ void StackGuard::TerminateExecution() {
bool StackGuard::IsRuntimeProfilerTick() {
ExecutionAccess access(isolate_);
return thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK;
return (thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK) != 0;
}
@ -433,6 +434,22 @@ void StackGuard::RequestRuntimeProfilerTick() {
}
bool StackGuard::IsGCRequest() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & GC_REQUEST) != 0;
}
void StackGuard::RequestGC() {
ExecutionAccess access(isolate_);
thread_local_.interrupt_flags_ |= GC_REQUEST;
if (thread_local_.postpone_interrupts_nesting_ == 0) {
thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
isolate_->heap()->SetStackLimits();
}
}
#ifdef ENABLE_DEBUGGER_SUPPORT
bool StackGuard::IsDebugBreak() {
ExecutionAccess access(isolate_);
@ -555,14 +572,15 @@ void StackGuard::InitThread(const ExecutionAccess& lock) {
// --- C a l l s t o n a t i v e s ---
#define RETURN_NATIVE_CALL(name, argc, argv, has_pending_exception) \
do { \
Isolate* isolate = Isolate::Current(); \
Object** args[argc] = argv; \
ASSERT(has_pending_exception != NULL); \
return Call(isolate->name##_fun(), \
isolate->js_builtins_object(), argc, args, \
has_pending_exception); \
#define RETURN_NATIVE_CALL(name, args, has_pending_exception) \
do { \
Isolate* isolate = Isolate::Current(); \
Handle<Object> argv[] = args; \
ASSERT(has_pending_exception != NULL); \
return Call(isolate->name##_fun(), \
isolate->js_builtins_object(), \
ARRAY_SIZE(argv), argv, \
has_pending_exception); \
} while (false)
@ -583,44 +601,44 @@ Handle<Object> Execution::ToBoolean(Handle<Object> obj) {
Handle<Object> Execution::ToNumber(Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_number, 1, { obj.location() }, exc);
RETURN_NATIVE_CALL(to_number, { obj }, exc);
}
Handle<Object> Execution::ToString(Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_string, 1, { obj.location() }, exc);
RETURN_NATIVE_CALL(to_string, { obj }, exc);
}
Handle<Object> Execution::ToDetailString(Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_detail_string, 1, { obj.location() }, exc);
RETURN_NATIVE_CALL(to_detail_string, { obj }, exc);
}
Handle<Object> Execution::ToObject(Handle<Object> obj, bool* exc) {
if (obj->IsSpecObject()) return obj;
RETURN_NATIVE_CALL(to_object, 1, { obj.location() }, exc);
RETURN_NATIVE_CALL(to_object, { obj }, exc);
}
Handle<Object> Execution::ToInteger(Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_integer, 1, { obj.location() }, exc);
RETURN_NATIVE_CALL(to_integer, { obj }, exc);
}
Handle<Object> Execution::ToUint32(Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_uint32, 1, { obj.location() }, exc);
RETURN_NATIVE_CALL(to_uint32, { obj }, exc);
}
Handle<Object> Execution::ToInt32(Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_int32, 1, { obj.location() }, exc);
RETURN_NATIVE_CALL(to_int32, { obj }, exc);
}
Handle<Object> Execution::NewDate(double time, bool* exc) {
Handle<Object> time_obj = FACTORY->NewNumber(time);
RETURN_NATIVE_CALL(create_date, 1, { time_obj.location() }, exc);
RETURN_NATIVE_CALL(create_date, { time_obj }, exc);
}
@ -657,7 +675,7 @@ Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
bool caught_exception;
Handle<Object> index_object = factory->NewNumberFromInt(int_index);
Object** index_arg[] = { index_object.location() };
Handle<Object> index_arg[] = { index_object };
Handle<Object> result = TryCall(Handle<JSFunction>::cast(char_at),
string,
ARRAY_SIZE(index_arg),
@ -671,7 +689,8 @@ Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
Handle<JSFunction> Execution::InstantiateFunction(
Handle<FunctionTemplateInfo> data, bool* exc) {
Handle<FunctionTemplateInfo> data,
bool* exc) {
Isolate* isolate = data->GetIsolate();
// Fast case: see if the function has already been instantiated
int serial_number = Smi::cast(data->serial_number())->value();
@ -680,10 +699,12 @@ Handle<JSFunction> Execution::InstantiateFunction(
GetElementNoExceptionThrown(serial_number);
if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
// The function has not yet been instantiated in this context; do it.
Object** args[1] = { Handle<Object>::cast(data).location() };
Handle<Object> result =
Call(isolate->instantiate_fun(),
isolate->js_builtins_object(), 1, args, exc);
Handle<Object> args[] = { data };
Handle<Object> result = Call(isolate->instantiate_fun(),
isolate->js_builtins_object(),
ARRAY_SIZE(args),
args,
exc);
if (*exc) return Handle<JSFunction>::null();
return Handle<JSFunction>::cast(result);
}
@ -710,10 +731,12 @@ Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data,
ASSERT(!*exc);
return Handle<JSObject>(JSObject::cast(result));
} else {
Object** args[1] = { Handle<Object>::cast(data).location() };
Handle<Object> result =
Call(isolate->instantiate_fun(),
isolate->js_builtins_object(), 1, args, exc);
Handle<Object> args[] = { data };
Handle<Object> result = Call(isolate->instantiate_fun(),
isolate->js_builtins_object(),
ARRAY_SIZE(args),
args,
exc);
if (*exc) return Handle<JSObject>::null();
return Handle<JSObject>::cast(result);
}
@ -724,9 +747,12 @@ void Execution::ConfigureInstance(Handle<Object> instance,
Handle<Object> instance_template,
bool* exc) {
Isolate* isolate = Isolate::Current();
Object** args[2] = { instance.location(), instance_template.location() };
Handle<Object> args[] = { instance, instance_template };
Execution::Call(isolate->configure_instance_fun(),
isolate->js_builtins_object(), 2, args, exc);
isolate->js_builtins_object(),
ARRAY_SIZE(args),
args,
exc);
}
@ -735,16 +761,13 @@ Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
Handle<Object> pos,
Handle<Object> is_global) {
Isolate* isolate = fun->GetIsolate();
const int argc = 4;
Object** args[argc] = { recv.location(),
Handle<Object>::cast(fun).location(),
pos.location(),
is_global.location() };
bool caught_exception = false;
Handle<Object> result =
TryCall(isolate->get_stack_trace_line_fun(),
isolate->js_builtins_object(), argc, args,
&caught_exception);
Handle<Object> args[] = { recv, fun, pos, is_global };
bool caught_exception;
Handle<Object> result = TryCall(isolate->get_stack_trace_line_fun(),
isolate->js_builtins_object(),
ARRAY_SIZE(args),
args,
&caught_exception);
if (caught_exception || !result->IsString()) {
return isolate->factory()->empty_symbol();
}
@ -852,6 +875,12 @@ void Execution::ProcessDebugMesssages(bool debug_command_only) {
MaybeObject* Execution::HandleStackGuardInterrupt() {
Isolate* isolate = Isolate::Current();
StackGuard* stack_guard = isolate->stack_guard();
if (stack_guard->IsGCRequest()) {
isolate->heap()->CollectAllGarbage(false);
stack_guard->Continue(GC_REQUEST);
}
isolate->counters()->stack_interrupts()->Increment();
if (stack_guard->IsRuntimeProfilerTick()) {
isolate->counters()->runtime_profiler_ticks()->Increment();

13
deps/v8/src/execution.h

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -41,7 +41,8 @@ enum InterruptFlag {
DEBUGCOMMAND = 1 << 2,
PREEMPT = 1 << 3,
TERMINATE = 1 << 4,
RUNTIME_PROFILER_TICK = 1 << 5
RUNTIME_PROFILER_TICK = 1 << 5,
GC_REQUEST = 1 << 6
};
class Execution : public AllStatic {
@ -60,7 +61,7 @@ class Execution : public AllStatic {
static Handle<Object> Call(Handle<Object> callable,
Handle<Object> receiver,
int argc,
Object*** args,
Handle<Object> argv[],
bool* pending_exception,
bool convert_receiver = false);
@ -73,7 +74,7 @@ class Execution : public AllStatic {
//
static Handle<Object> New(Handle<JSFunction> func,
int argc,
Object*** args,
Handle<Object> argv[],
bool* pending_exception);
// Call a function, just like Call(), but make sure to silently catch
@ -83,7 +84,7 @@ class Execution : public AllStatic {
static Handle<Object> TryCall(Handle<JSFunction> func,
Handle<Object> receiver,
int argc,
Object*** args,
Handle<Object> argv[],
bool* caught_exception);
// ECMA-262 9.2
@ -196,6 +197,8 @@ class StackGuard {
bool IsDebugCommand();
void DebugCommand();
#endif
bool IsGCRequest();
void RequestGC();
void Continue(InterruptFlag after_what);
// This provides an asynchronous read of the stack limits for the current

7
deps/v8/src/extensions/gc-extension.cc

@ -40,12 +40,7 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
bool compact = false;
// All allocation spaces other than NEW_SPACE have the same effect.
if (args.Length() >= 1 && args[0]->IsBoolean()) {
compact = args[0]->BooleanValue();
}
HEAP->CollectAllGarbage(compact);
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
return v8::Undefined();
}

96
deps/v8/src/factory.cc

@ -234,7 +234,7 @@ Handle<String> Factory::NewProperSubString(Handle<String> str,
Handle<String> Factory::NewExternalStringFromAscii(
ExternalAsciiString::Resource* resource) {
const ExternalAsciiString::Resource* resource) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateExternalStringFromAscii(resource),
@ -243,7 +243,7 @@ Handle<String> Factory::NewExternalStringFromAscii(
Handle<String> Factory::NewExternalStringFromTwoByte(
ExternalTwoByteString::Resource* resource) {
const ExternalTwoByteString::Resource* resource) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateExternalStringFromTwoByte(resource),
@ -404,10 +404,12 @@ Handle<JSGlobalPropertyCell> Factory::NewJSGlobalPropertyCell(
}
Handle<Map> Factory::NewMap(InstanceType type, int instance_size) {
Handle<Map> Factory::NewMap(InstanceType type,
int instance_size,
ElementsKind elements_kind) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateMap(type, instance_size),
isolate()->heap()->AllocateMap(type, instance_size, elements_kind),
Map);
}
@ -455,23 +457,11 @@ Handle<Map> Factory::CopyMapDropTransitions(Handle<Map> src) {
}
Handle<Map> Factory::GetFastElementsMap(Handle<Map> src) {
CALL_HEAP_FUNCTION(isolate(), src->GetFastElementsMap(), Map);
}
Handle<Map> Factory::GetSlowElementsMap(Handle<Map> src) {
CALL_HEAP_FUNCTION(isolate(), src->GetSlowElementsMap(), Map);
}
Handle<Map> Factory::GetElementsTransitionMap(
Handle<Map> src,
ElementsKind elements_kind,
bool safe_to_add_transition) {
Handle<JSObject> src,
ElementsKind elements_kind) {
CALL_HEAP_FUNCTION(isolate(),
src->GetElementsTransitionMap(elements_kind,
safe_to_add_transition),
src->GetElementsTransitionMap(elements_kind),
Map);
}
@ -641,14 +631,16 @@ Handle<Object> Factory::NewError(const char* maker,
return undefined_value();
Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
Handle<Object> type_obj = LookupAsciiSymbol(type);
Object** argv[2] = { type_obj.location(),
Handle<Object>::cast(args).location() };
Handle<Object> argv[] = { type_obj, args };
// Invoke the JavaScript factory method. If an exception is thrown while
// running the factory method, use the exception as the result.
bool caught_exception;
Handle<Object> result = Execution::TryCall(fun,
isolate()->js_builtins_object(), 2, argv, &caught_exception);
isolate()->js_builtins_object(),
ARRAY_SIZE(argv),
argv,
&caught_exception);
return result;
}
@ -664,13 +656,16 @@ Handle<Object> Factory::NewError(const char* constructor,
Handle<JSFunction> fun = Handle<JSFunction>(
JSFunction::cast(isolate()->js_builtins_object()->
GetPropertyNoExceptionThrown(*constr)));
Object** argv[1] = { Handle<Object>::cast(message).location() };
Handle<Object> argv[] = { message };
// Invoke the JavaScript factory method. If an exception is thrown while
// running the factory method, use the exception as the result.
bool caught_exception;
Handle<Object> result = Execution::TryCall(fun,
isolate()->js_builtins_object(), 1, argv, &caught_exception);
isolate()->js_builtins_object(),
ARRAY_SIZE(argv),
argv,
&caught_exception);
return result;
}
@ -722,7 +717,12 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
if (force_initial_map ||
type != JS_OBJECT_TYPE ||
instance_size != JSObject::kHeaderSize) {
Handle<Map> initial_map = NewMap(type, instance_size);
ElementsKind default_elements_kind = FLAG_smi_only_arrays
? FAST_SMI_ONLY_ELEMENTS
: FAST_ELEMENTS;
Handle<Map> initial_map = NewMap(type,
instance_size,
default_elements_kind);
function->set_initial_map(*initial_map);
initial_map->set_constructor(*function);
}
@ -908,11 +908,26 @@ Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArray> elements,
Handle<JSArray> result =
Handle<JSArray>::cast(NewJSObject(isolate()->array_function(),
pretenure));
result->SetContent(*elements);
SetContent(result, elements);
return result;
}
void Factory::SetContent(Handle<JSArray> array,
Handle<FixedArray> elements) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
array->SetContent(*elements));
}
void Factory::EnsureCanContainNonSmiElements(Handle<JSArray> array) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
array->EnsureCanContainNonSmiElements());
}
Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler,
Handle<Object> prototype) {
CALL_HEAP_FUNCTION(
@ -938,6 +953,13 @@ void Factory::BecomeJSFunction(Handle<JSReceiver> object) {
}
void Factory::SetIdentityHash(Handle<JSObject> object, Object* hash) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
object->SetIdentityHash(hash, ALLOW_CREATION));
}
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<String> name,
int number_of_literals,
@ -990,6 +1012,12 @@ Handle<String> Factory::NumberToString(Handle<Object> number) {
}
Handle<String> Factory::Uint32ToString(uint32_t value) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->Uint32ToString(value), String);
}
Handle<NumberDictionary> Factory::DictionaryAtNumberPut(
Handle<NumberDictionary> dictionary,
uint32_t key,
@ -1299,4 +1327,20 @@ void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc,
}
Handle<Object> Factory::GlobalConstantFor(Handle<String> name) {
Heap* h = isolate()->heap();
if (name->Equals(h->undefined_symbol())) return undefined_value();
if (name->Equals(h->nan_symbol())) return nan_value();
if (name->Equals(h->infinity_symbol())) return infinity_value();
return Handle<Object>::null();
}
Handle<Object> Factory::ToBoolean(bool value) {
return Handle<Object>(value
? isolate()->heap()->true_value()
: isolate()->heap()->false_value());
}
} } // namespace v8::internal

32
deps/v8/src/factory.h

@ -145,9 +145,9 @@ class Factory {
// not make sense to have a UTF-8 factory function for external strings,
// because we cannot change the underlying buffer.
Handle<String> NewExternalStringFromAscii(
ExternalAsciiString::Resource* resource);
const ExternalAsciiString::Resource* resource);
Handle<String> NewExternalStringFromTwoByte(
ExternalTwoByteString::Resource* resource);
const ExternalTwoByteString::Resource* resource);
// Create a global (but otherwise uninitialized) context.
Handle<Context> NewGlobalContext();
@ -203,7 +203,9 @@ class Factory {
Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
Handle<Object> value);
Handle<Map> NewMap(InstanceType type, int instance_size);
Handle<Map> NewMap(InstanceType type,
int instance_size,
ElementsKind elements_kind = FAST_ELEMENTS);
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
@ -215,13 +217,8 @@ class Factory {
Handle<Map> CopyMapDropTransitions(Handle<Map> map);
Handle<Map> GetFastElementsMap(Handle<Map> map);
Handle<Map> GetSlowElementsMap(Handle<Map> map);
Handle<Map> GetElementsTransitionMap(Handle<Map> map,
ElementsKind elements_kind,
bool safe_to_add_transition);
Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
ElementsKind elements_kind);
Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
@ -258,12 +255,18 @@ class Factory {
Handle<FixedArray> elements,
PretenureFlag pretenure = NOT_TENURED);
void SetContent(Handle<JSArray> array, Handle<FixedArray> elements);
void EnsureCanContainNonSmiElements(Handle<JSArray> array);
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
// Change the type of the argument into a JS object/function and reinitialize.
void BecomeJSObject(Handle<JSReceiver> object);
void BecomeJSFunction(Handle<JSReceiver> object);
void SetIdentityHash(Handle<JSObject> object, Object* hash);
Handle<JSFunction> NewFunction(Handle<String> name,
Handle<Object> prototype);
@ -356,6 +359,7 @@ class Factory {
PropertyAttributes attributes);
Handle<String> NumberToString(Handle<Object> number);
Handle<String> Uint32ToString(uint32_t value);
enum ApiInstanceType {
JavaScriptObject,
@ -442,6 +446,14 @@ class Factory {
JSRegExp::Flags flags,
int capture_count);
// Returns the value for a known global constant (a property of the global
// object which is neither configurable nor writable) like 'undefined'.
// Returns a null handle when the given name is unknown.
Handle<Object> GlobalConstantFor(Handle<String> name);
// Converts the given boolean condition to JavaScript boolean value.
Handle<Object> ToBoolean(bool value);
private:
Isolate* isolate() { return reinterpret_cast<Isolate*>(this); }

24
deps/v8/src/flag-definitions.h

@ -104,6 +104,7 @@ DEFINE_bool(harmony_block_scoping, false, "enable harmony block scoping")
// Flags for experimental implementation features.
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
DEFINE_bool(smi_only_arrays, false, "tracks arrays with only smi values")
DEFINE_bool(string_slices, false, "use string slices")
// Flags for Crankshaft.
@ -253,10 +254,16 @@ DEFINE_bool(print_cumulative_gc_stat, false,
"print cumulative GC statistics in name=value format on exit")
DEFINE_bool(trace_gc_verbose, false,
"print more details following each garbage collection")
DEFINE_bool(trace_fragmentation, false,
"report fragmentation for old pointer and data pages")
DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached")
DEFINE_bool(flush_code, true,
"flush code that we expect not to use again before full gc")
DEFINE_bool(incremental_marking, true, "use incremental marking")
DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps")
DEFINE_bool(trace_incremental_marking, false,
"trace progress of the incremental marking")
// v8.cc
DEFINE_bool(use_idle_notification, true,
@ -276,8 +283,13 @@ DEFINE_bool(native_code_counters, false,
// mark-compact.cc
DEFINE_bool(always_compact, false, "Perform compaction on every full GC")
DEFINE_bool(lazy_sweeping, true,
"Use lazy sweeping for old pointer and data spaces")
DEFINE_bool(cleanup_caches_in_maps_at_gc, true,
"Flush code caches in maps during mark compact cycle.")
DEFINE_bool(never_compact, false,
"Never perform compaction on full GC - testing only")
DEFINE_bool(compact_code_space, false, "Compact code space")
DEFINE_bool(cleanup_code_caches_at_gc, true,
"Flush inline caches prior to mark compact collection and "
"flush code caches in maps during mark compact cycle.")
@ -348,11 +360,15 @@ DEFINE_string(testing_serialization_file, "/tmp/serdes",
DEFINE_bool(help, false, "Print usage message, including flags, on console")
DEFINE_bool(dump_counters, false, "Dump counters on exit")
#ifdef ENABLE_DEBUGGER_SUPPORT
DEFINE_bool(debugger, false, "Enable JavaScript debugger")
DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the "
"debugger agent in another process")
DEFINE_bool(debugger_agent, false, "Enable debugger agent")
DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
#endif // ENABLE_DEBUGGER_SUPPORT
DEFINE_string(map_counters, "", "Map counters to a file")
DEFINE_args(js_arguments, JSArguments(),
"Pass all remaining arguments to the script. Alias for \"--\".")
@ -425,6 +441,11 @@ DEFINE_bool(print_global_handles, false, "report global handles after GC")
// ic.cc
DEFINE_bool(trace_ic, false, "trace inline cache state transitions")
// mark-compact.cc
DEFINE_bool(force_marking_deque_overflows, false,
"force overflows of marking deque by reducing it's size "
"to 64 words")
// objects.cc
DEFINE_bool(trace_normalization,
false,
@ -444,6 +465,9 @@ DEFINE_bool(collect_heap_spill_statistics, false,
DEFINE_bool(trace_isolates, false, "trace isolate state changes")
DEFINE_bool(trace_live_byte_count, false,
"trace updates to page live byte count")
// VM state
DEFINE_bool(log_state_changes, false, "Log state changes.")

67
deps/v8/src/frames-inl.h

@ -77,6 +77,21 @@ inline StackHandler* StackHandler::FromAddress(Address address) {
}
inline bool StackHandler::is_entry() const {
return state() == ENTRY;
}
inline bool StackHandler::is_try_catch() const {
return state() == TRY_CATCH;
}
inline bool StackHandler::is_try_finally() const {
return state() == TRY_FINALLY;
}
inline StackHandler::State StackHandler::state() const {
const int offset = StackHandlerConstants::kStateOffset;
return static_cast<State>(Memory::int_at(address() + offset));
@ -105,8 +120,33 @@ inline StackHandler* StackFrame::top_handler() const {
}
inline Code* StackFrame::LookupCode() const {
return GetContainingCode(isolate(), pc());
}
inline Code* StackFrame::GetContainingCode(Isolate* isolate, Address pc) {
return isolate->pc_to_code_cache()->GetCacheEntry(pc)->code;
return isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
}
inline EntryFrame::EntryFrame(StackFrameIterator* iterator)
: StackFrame(iterator) {
}
inline EntryConstructFrame::EntryConstructFrame(StackFrameIterator* iterator)
: EntryFrame(iterator) {
}
inline ExitFrame::ExitFrame(StackFrameIterator* iterator)
: StackFrame(iterator) {
}
inline StandardFrame::StandardFrame(StackFrameIterator* iterator)
: StackFrame(iterator) {
}
@ -155,6 +195,11 @@ inline bool StandardFrame::IsConstructFrame(Address fp) {
}
inline JavaScriptFrame::JavaScriptFrame(StackFrameIterator* iterator)
: StandardFrame(iterator) {
}
Address JavaScriptFrame::GetParameterSlot(int index) const {
int param_count = ComputeParametersCount();
ASSERT(-1 <= index && index < param_count);
@ -190,6 +235,26 @@ inline Object* JavaScriptFrame::function() const {
}
inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator)
: JavaScriptFrame(iterator) {
}
inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
StackFrameIterator* iterator) : JavaScriptFrame(iterator) {
}
inline InternalFrame::InternalFrame(StackFrameIterator* iterator)
: StandardFrame(iterator) {
}
inline ConstructFrame::ConstructFrame(StackFrameIterator* iterator)
: InternalFrame(iterator) {
}
template<typename Iterator>
inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
Isolate* isolate)

117
deps/v8/src/frames.cc

@ -366,16 +366,17 @@ void SafeStackTraceFrameIterator::Advance() {
Code* StackFrame::GetSafepointData(Isolate* isolate,
Address pc,
Address inner_pointer,
SafepointEntry* safepoint_entry,
unsigned* stack_slots) {
PcToCodeCache::PcToCodeCacheEntry* entry =
isolate->pc_to_code_cache()->GetCacheEntry(pc);
InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry =
isolate->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer);
if (!entry->safepoint_entry.is_valid()) {
entry->safepoint_entry = entry->code->GetSafepointEntry(pc);
entry->safepoint_entry = entry->code->GetSafepointEntry(inner_pointer);
ASSERT(entry->safepoint_entry.is_valid());
} else {
ASSERT(entry->safepoint_entry.Equals(entry->code->GetSafepointEntry(pc)));
ASSERT(entry->safepoint_entry.Equals(
entry->code->GetSafepointEntry(inner_pointer)));
}
// Fill in the results and return the code.
@ -392,11 +393,16 @@ bool StackFrame::HasHandler() const {
}
#ifdef DEBUG
static bool GcSafeCodeContains(HeapObject* object, Address addr);
#endif
void StackFrame::IteratePc(ObjectVisitor* v,
Address* pc_address,
Code* holder) {
Address pc = *pc_address;
ASSERT(holder->contains(pc));
ASSERT(GcSafeCodeContains(holder, pc));
unsigned pc_offset = static_cast<unsigned>(pc - holder->instruction_start());
Object* code = holder;
v->VisitPointer(&code);
@ -819,7 +825,8 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
// back to a slow search in this case to find the original optimized
// code object.
if (!code->contains(pc())) {
code = isolate()->pc_to_code_cache()->GcSafeFindCodeForPc(pc());
code = isolate()->inner_pointer_to_code_cache()->
GcSafeFindCodeForInnerPointer(pc());
}
ASSERT(code != NULL);
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
@ -881,6 +888,11 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
}
int ArgumentsAdaptorFrame::GetNumberOfIncomingArguments() const {
return Smi::cast(GetExpression(0))->value();
}
Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
return fp() + StandardFrameConstants::kCallerSPOffset;
}
@ -1155,52 +1167,89 @@ JavaScriptFrame* StackFrameLocator::FindJavaScriptFrame(int n) {
// -------------------------------------------------------------------------
Code* PcToCodeCache::GcSafeCastToCode(HeapObject* object, Address pc) {
static Map* GcSafeMapOfCodeSpaceObject(HeapObject* object) {
MapWord map_word = object->map_word();
return map_word.IsForwardingAddress() ?
map_word.ToForwardingAddress()->map() : map_word.ToMap();
}
static int GcSafeSizeOfCodeSpaceObject(HeapObject* object) {
return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object));
}
#ifdef DEBUG
static bool GcSafeCodeContains(HeapObject* code, Address addr) {
Map* map = GcSafeMapOfCodeSpaceObject(code);
ASSERT(map == code->GetHeap()->code_map());
Address start = code->address();
Address end = code->address() + code->SizeFromMap(map);
return start <= addr && addr < end;
}
#endif
Code* InnerPointerToCodeCache::GcSafeCastToCode(HeapObject* object,
Address inner_pointer) {
Code* code = reinterpret_cast<Code*>(object);
ASSERT(code != NULL && code->contains(pc));
ASSERT(code != NULL && GcSafeCodeContains(code, inner_pointer));
return code;
}
Code* PcToCodeCache::GcSafeFindCodeForPc(Address pc) {
Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
Address inner_pointer) {
Heap* heap = isolate_->heap();
// Check if the pc points into a large object chunk.
LargeObjectChunk* chunk = heap->lo_space()->FindChunkContainingPc(pc);
if (chunk != NULL) return GcSafeCastToCode(chunk->GetObject(), pc);
// Iterate through the 8K page until we reach the end or find an
// object starting after the pc.
Page* page = Page::FromAddress(pc);
HeapObjectIterator iterator(page, heap->GcSafeSizeOfOldObjectFunction());
HeapObject* previous = NULL;
// Check if the inner pointer points into a large object chunk.
LargePage* large_page = heap->lo_space()->FindPageContainingPc(inner_pointer);
if (large_page != NULL) {
return GcSafeCastToCode(large_page->GetObject(), inner_pointer);
}
// Iterate through the page until we reach the end or find an object starting
// after the inner pointer.
Page* page = Page::FromAddress(inner_pointer);
Address addr = page->skip_list()->StartFor(inner_pointer);
Address top = heap->code_space()->top();
Address limit = heap->code_space()->limit();
while (true) {
HeapObject* next = iterator.next();
if (next == NULL || next->address() >= pc) {
return GcSafeCastToCode(previous, pc);
if (addr == top && addr != limit) {
addr = limit;
continue;
}
previous = next;
HeapObject* obj = HeapObject::FromAddress(addr);
int obj_size = GcSafeSizeOfCodeSpaceObject(obj);
Address next_addr = addr + obj_size;
if (next_addr > inner_pointer) return GcSafeCastToCode(obj, inner_pointer);
addr = next_addr;
}
}
PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
isolate_->counters()->pc_to_code()->Increment();
ASSERT(IsPowerOf2(kPcToCodeCacheSize));
ASSERT(IsPowerOf2(kInnerPointerToCodeCacheSize));
uint32_t hash = ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(pc)));
uint32_t index = hash & (kPcToCodeCacheSize - 1);
PcToCodeCacheEntry* entry = cache(index);
if (entry->pc == pc) {
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(inner_pointer)));
uint32_t index = hash & (kInnerPointerToCodeCacheSize - 1);
InnerPointerToCodeCacheEntry* entry = cache(index);
if (entry->inner_pointer == inner_pointer) {
isolate_->counters()->pc_to_code_cached()->Increment();
ASSERT(entry->code == GcSafeFindCodeForPc(pc));
ASSERT(entry->code == GcSafeFindCodeForInnerPointer(inner_pointer));
} else {
// Because this code may be interrupted by a profiling signal that
// also queries the cache, we cannot update pc before the code has
// been set. Otherwise, we risk trying to use a cache entry before
// also queries the cache, we cannot update inner_pointer before the code
// has been set. Otherwise, we risk trying to use a cache entry before
// the code has been computed.
entry->code = GcSafeFindCodeForPc(pc);
entry->code = GcSafeFindCodeForInnerPointer(inner_pointer);
entry->safepoint_entry.Reset();
entry->pc = pc;
entry->inner_pointer = inner_pointer;
}
return entry;
}

78
deps/v8/src/frames.h

@ -49,36 +49,36 @@ class StackFrameIterator;
class ThreadLocalTop;
class Isolate;
class PcToCodeCache {
class InnerPointerToCodeCache {
public:
struct PcToCodeCacheEntry {
Address pc;
struct InnerPointerToCodeCacheEntry {
Address inner_pointer;
Code* code;
SafepointEntry safepoint_entry;
};
explicit PcToCodeCache(Isolate* isolate) : isolate_(isolate) {
explicit InnerPointerToCodeCache(Isolate* isolate) : isolate_(isolate) {
Flush();
}
Code* GcSafeFindCodeForPc(Address pc);
Code* GcSafeCastToCode(HeapObject* object, Address pc);
Code* GcSafeFindCodeForInnerPointer(Address inner_pointer);
Code* GcSafeCastToCode(HeapObject* object, Address inner_pointer);
void Flush() {
memset(&cache_[0], 0, sizeof(cache_));
}
PcToCodeCacheEntry* GetCacheEntry(Address pc);
InnerPointerToCodeCacheEntry* GetCacheEntry(Address inner_pointer);
private:
PcToCodeCacheEntry* cache(int index) { return &cache_[index]; }
InnerPointerToCodeCacheEntry* cache(int index) { return &cache_[index]; }
Isolate* isolate_;
static const int kPcToCodeCacheSize = 1024;
PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
static const int kInnerPointerToCodeCacheSize = 1024;
InnerPointerToCodeCacheEntry cache_[kInnerPointerToCodeCacheSize];
DISALLOW_COPY_AND_ASSIGN(PcToCodeCache);
DISALLOW_COPY_AND_ASSIGN(InnerPointerToCodeCache);
};
@ -106,9 +106,9 @@ class StackHandler BASE_EMBEDDED {
static inline StackHandler* FromAddress(Address address);
// Testers
bool is_entry() { return state() == ENTRY; }
bool is_try_catch() { return state() == TRY_CATCH; }
bool is_try_finally() { return state() == TRY_FINALLY; }
inline bool is_entry() const;
inline bool is_try_catch() const;
inline bool is_try_finally() const;
private:
// Accessors.
@ -139,7 +139,10 @@ class StackFrame BASE_EMBEDDED {
enum Type {
NONE = 0,
STACK_FRAME_TYPE_LIST(DECLARE_TYPE)
NUMBER_OF_TYPES
NUMBER_OF_TYPES,
// Used by FrameScope to indicate that the stack frame is constructed
// manually and the FrameScope does not need to emit code.
MANUAL
};
#undef DECLARE_TYPE
@ -215,9 +218,7 @@ class StackFrame BASE_EMBEDDED {
virtual Code* unchecked_code() const = 0;
// Get the code associated with this frame.
Code* LookupCode() const {
return GetContainingCode(isolate(), pc());
}
inline Code* LookupCode() const;
// Get the code object that contains the given pc.
static inline Code* GetContainingCode(Isolate* isolate, Address pc);
@ -299,7 +300,7 @@ class EntryFrame: public StackFrame {
virtual void SetCallerFp(Address caller_fp);
protected:
explicit EntryFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
inline explicit EntryFrame(StackFrameIterator* iterator);
// The caller stack pointer for entry frames is always zero. The
// real information about the caller frame is available through the
@ -326,8 +327,7 @@ class EntryConstructFrame: public EntryFrame {
}
protected:
explicit EntryConstructFrame(StackFrameIterator* iterator)
: EntryFrame(iterator) { }
inline explicit EntryConstructFrame(StackFrameIterator* iterator);
private:
friend class StackFrameIterator;
@ -361,7 +361,7 @@ class ExitFrame: public StackFrame {
static void FillState(Address fp, Address sp, State* state);
protected:
explicit ExitFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
inline explicit ExitFrame(StackFrameIterator* iterator);
virtual Address GetCallerStackPointer() const;
@ -394,8 +394,7 @@ class StandardFrame: public StackFrame {
}
protected:
explicit StandardFrame(StackFrameIterator* iterator)
: StackFrame(iterator) { }
inline explicit StandardFrame(StackFrameIterator* iterator);
virtual void ComputeCallerState(State* state) const;
@ -514,8 +513,7 @@ class JavaScriptFrame: public StandardFrame {
}
protected:
explicit JavaScriptFrame(StackFrameIterator* iterator)
: StandardFrame(iterator) { }
inline explicit JavaScriptFrame(StackFrameIterator* iterator);
virtual Address GetCallerStackPointer() const;
@ -552,8 +550,7 @@ class OptimizedFrame : public JavaScriptFrame {
DeoptimizationInputData* GetDeoptimizationData(int* deopt_index);
protected:
explicit OptimizedFrame(StackFrameIterator* iterator)
: JavaScriptFrame(iterator) { }
inline explicit OptimizedFrame(StackFrameIterator* iterator);
private:
friend class StackFrameIterator;
@ -581,12 +578,9 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame {
int index) const;
protected:
explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator)
: JavaScriptFrame(iterator) { }
inline explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator);
virtual int GetNumberOfIncomingArguments() const {
return Smi::cast(GetExpression(0))->value();
}
virtual int GetNumberOfIncomingArguments() const;
virtual Address GetCallerStackPointer() const;
@ -611,8 +605,7 @@ class InternalFrame: public StandardFrame {
}
protected:
explicit InternalFrame(StackFrameIterator* iterator)
: StandardFrame(iterator) { }
inline explicit InternalFrame(StackFrameIterator* iterator);
virtual Address GetCallerStackPointer() const;
@ -633,8 +626,7 @@ class ConstructFrame: public InternalFrame {
}
protected:
explicit ConstructFrame(StackFrameIterator* iterator)
: InternalFrame(iterator) { }
inline explicit ConstructFrame(StackFrameIterator* iterator);
private:
friend class StackFrameIterator;
@ -715,15 +707,19 @@ class JavaScriptFrameIteratorTemp BASE_EMBEDDED {
inline JavaScriptFrameIteratorTemp(Isolate* isolate, StackFrame::Id id);
JavaScriptFrameIteratorTemp(Address fp, Address sp,
Address low_bound, Address high_bound) :
JavaScriptFrameIteratorTemp(Address fp,
Address sp,
Address low_bound,
Address high_bound) :
iterator_(fp, sp, low_bound, high_bound) {
if (!done()) Advance();
}
JavaScriptFrameIteratorTemp(Isolate* isolate,
Address fp, Address sp,
Address low_bound, Address high_bound) :
Address fp,
Address sp,
Address low_bound,
Address high_bound) :
iterator_(isolate, fp, sp, low_bound, high_bound) {
if (!done()) Advance();
}

41
deps/v8/src/full-codegen.cc

@ -244,11 +244,6 @@ void BreakableStatementChecker::VisitBinaryOperation(BinaryOperation* expr) {
}
void BreakableStatementChecker::VisitCompareToNull(CompareToNull* expr) {
Visit(expr->expression());
}
void BreakableStatementChecker::VisitCompareOperation(CompareOperation* expr) {
Visit(expr->left());
Visit(expr->right());
@ -291,8 +286,10 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
code->set_optimizable(info->IsOptimizable());
cgen.PopulateDeoptimizationData(code);
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
#ifdef ENABLE_DEBUGGER_SUPPORT
code->set_has_debug_break_slots(
info->isolate()->debugger()->IsDebuggerActive());
#endif // ENABLE_DEBUGGER_SUPPORT
code->set_allow_osr_at_loop_nesting_level(0);
code->set_stack_check_table_offset(table_offset);
CodeGenerator::PrintCode(code, info);
@ -823,9 +820,19 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
if (stmt->block_scope() != NULL) {
{ Comment cmnt(masm_, "[ Extend block context");
scope_ = stmt->block_scope();
__ Push(scope_->GetSerializedScopeInfo());
Handle<SerializedScopeInfo> scope_info = scope_->GetSerializedScopeInfo();
int heap_slots =
scope_info->NumberOfContextSlots() - Context::MIN_CONTEXT_SLOTS;
__ Push(scope_info);
PushFunctionArgumentForContextAllocation();
__ CallRuntime(Runtime::kPushBlockContext, 2);
if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
FastNewBlockContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kPushBlockContext, 2);
}
// Replace the context stored in the frame.
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
}
@ -1321,19 +1328,21 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryCatch::Exit(
}
bool FullCodeGenerator::TryLiteralCompare(CompareOperation* compare,
Label* if_true,
Label* if_false,
Label* fall_through) {
Expression *expr;
bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
Expression *sub_expr;
Handle<String> check;
if (compare->IsLiteralCompareTypeof(&expr, &check)) {
EmitLiteralCompareTypeof(expr, check, if_true, if_false, fall_through);
if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
EmitLiteralCompareTypeof(sub_expr, check);
return true;
}
if (expr->IsLiteralCompareUndefined(&sub_expr)) {
EmitLiteralCompareNil(expr, sub_expr, kUndefinedValue);
return true;
}
if (compare->IsLiteralCompareUndefined(&expr)) {
EmitLiteralCompareUndefined(expr, if_true, if_false, fall_through);
if (expr->IsLiteralCompareNull(&sub_expr)) {
EmitLiteralCompareNil(expr, sub_expr, kNullValue);
return true;
}

23
deps/v8/src/full-codegen.h

@ -391,25 +391,16 @@ class FullCodeGenerator: public AstVisitor {
// Try to perform a comparison as a fast inlined literal compare if
// the operands allow it. Returns true if the compare operations
// has been matched and all code generated; false otherwise.
bool TryLiteralCompare(CompareOperation* compare,
Label* if_true,
Label* if_false,
Label* fall_through);
bool TryLiteralCompare(CompareOperation* compare);
// Platform-specific code for comparing the type of a value with
// a given literal string.
void EmitLiteralCompareTypeof(Expression* expr,
Handle<String> check,
Label* if_true,
Label* if_false,
Label* fall_through);
// Platform-specific code for strict equality comparison with
// the undefined value.
void EmitLiteralCompareUndefined(Expression* expr,
Label* if_true,
Label* if_false,
Label* fall_through);
void EmitLiteralCompareTypeof(Expression* expr, Handle<String> check);
// Platform-specific code for equality comparison with a nil-like value.
void EmitLiteralCompareNil(CompareOperation* expr,
Expression* sub_expr,
NilValue nil);
// Bailout support.
void PrepareForBailout(Expression* node, State state);

6
deps/v8/src/func-name-inferrer.h

@ -70,6 +70,12 @@ class FuncNameInferrer : public ZoneObject {
}
}
void RemoveLastFunction() {
if (IsOpen() && !funcs_to_infer_.is_empty()) {
funcs_to_infer_.RemoveLast();
}
}
// Infers a function name and leaves names collection state.
void Infer() {
ASSERT(IsOpen());

4
deps/v8/src/globals.h

@ -255,6 +255,10 @@ const int kBinary32MinExponent = 0x01;
const int kBinary32MantissaBits = 23;
const int kBinary32ExponentShift = 23;
// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
// other bits set.
const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
// ASCII/UC16 constants
// Code-point values in Unicode 4.0 are 21 bits wide.
typedef uint16_t uc16;

13
deps/v8/src/handles.cc

@ -421,17 +421,18 @@ Handle<Object> PreventExtensions(Handle<JSObject> object) {
}
Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
JSObject::HiddenPropertiesFlag flag) {
Handle<Object> SetHiddenProperty(Handle<JSObject> obj,
Handle<String> key,
Handle<Object> value) {
CALL_HEAP_FUNCTION(obj->GetIsolate(),
obj->GetHiddenProperties(flag),
obj->SetHiddenProperty(*key, *value),
Object);
}
int GetIdentityHash(Handle<JSObject> obj) {
int GetIdentityHash(Handle<JSReceiver> obj) {
CALL_AND_RETRY(obj->GetIsolate(),
obj->GetIdentityHash(JSObject::ALLOW_CREATION),
obj->GetIdentityHash(ALLOW_CREATION),
return Smi::cast(__object__)->value(),
return 0);
}
@ -886,7 +887,7 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
Handle<JSObject> key,
Handle<JSReceiver> key,
Handle<Object> value) {
CALL_HEAP_FUNCTION(table->GetIsolate(),
table->Put(*key, *value),

15
deps/v8/src/handles.h

@ -263,14 +263,13 @@ Handle<Object> GetPrototype(Handle<Object> obj);
Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value);
// Return the object's hidden properties object. If the object has no hidden
// properties and HiddenPropertiesFlag::ALLOW_CREATION is passed, then a new
// hidden property object will be allocated. Otherwise Heap::undefined_value
// is returned.
Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
JSObject::HiddenPropertiesFlag flag);
// Sets a hidden property on an object. Returns obj on success, undefined
// if trying to set the property on a detached proxy.
Handle<Object> SetHiddenProperty(Handle<JSObject> obj,
Handle<String> key,
Handle<Object> value);
int GetIdentityHash(Handle<JSObject> obj);
int GetIdentityHash(Handle<JSReceiver> obj);
Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
Handle<Object> DeleteProperty(Handle<JSObject> obj, Handle<String> prop);
@ -348,7 +347,7 @@ Handle<Object> SetPrototype(Handle<JSFunction> function,
Handle<Object> PreventExtensions(Handle<JSObject> object);
Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
Handle<JSObject> key,
Handle<JSReceiver> key,
Handle<Object> value);
// Does lazy compilation of the given function. Returns true on success and

104
deps/v8/src/heap-inl.h

@ -33,15 +33,26 @@
#include "list-inl.h"
#include "objects.h"
#include "v8-counters.h"
#include "store-buffer.h"
#include "store-buffer-inl.h"
namespace v8 {
namespace internal {
void PromotionQueue::insert(HeapObject* target, int size) {
if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) {
NewSpacePage* rear_page =
NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
ASSERT(!rear_page->prev_page()->is_anchor());
rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->body_limit());
}
*(--rear_) = reinterpret_cast<intptr_t>(target);
*(--rear_) = size;
// Assert no overflow into live objects.
ASSERT(reinterpret_cast<Address>(rear_) >= HEAP->new_space()->top());
#ifdef DEBUG
SemiSpace::AssertValidRange(HEAP->new_space()->top(),
reinterpret_cast<Address>(rear_));
#endif
}
@ -84,7 +95,7 @@ MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
? lo_space_->AllocateRaw(size)
? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@ -117,7 +128,7 @@ MaybeObject* Heap::AllocateTwoByteSymbol(Vector<const uc16> str,
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
? lo_space_->AllocateRaw(size)
? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@ -181,7 +192,7 @@ MaybeObject* Heap::AllocateRaw(int size_in_bytes,
} else if (CODE_SPACE == space) {
result = code_space_->AllocateRaw(size_in_bytes);
} else if (LO_SPACE == space) {
result = lo_space_->AllocateRaw(size_in_bytes);
result = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
} else if (CELL_SPACE == space) {
result = cell_space_->AllocateRaw(size_in_bytes);
} else {
@ -265,6 +276,11 @@ bool Heap::InNewSpace(Object* object) {
}
bool Heap::InNewSpace(Address addr) {
return new_space_.Contains(addr);
}
bool Heap::InFromSpace(Object* object) {
return new_space_.FromSpaceContains(object);
}
@ -275,29 +291,36 @@ bool Heap::InToSpace(Object* object) {
}
bool Heap::OldGenerationAllocationLimitReached() {
if (!incremental_marking()->IsStopped()) return false;
return OldGenerationSpaceAvailable() < 0;
}
bool Heap::ShouldBePromoted(Address old_address, int object_size) {
// An object should be promoted if:
// - the object has survived a scavenge operation or
// - to space is already 25% full.
return old_address < new_space_.age_mark()
|| (new_space_.Size() + object_size) >= (new_space_.Capacity() >> 2);
NewSpacePage* page = NewSpacePage::FromAddress(old_address);
Address age_mark = new_space_.age_mark();
bool below_mark = page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
(!page->ContainsLimit(age_mark) || old_address < age_mark);
return below_mark || (new_space_.Size() + object_size) >=
(new_space_.EffectiveCapacity() >> 2);
}
void Heap::RecordWrite(Address address, int offset) {
if (new_space_.Contains(address)) return;
ASSERT(!new_space_.FromSpaceContains(address));
SLOW_ASSERT(Contains(address + offset));
Page::FromAddress(address)->MarkRegionDirty(address + offset);
if (!InNewSpace(address)) store_buffer_.Mark(address + offset);
}
void Heap::RecordWrites(Address address, int start, int len) {
if (new_space_.Contains(address)) return;
ASSERT(!new_space_.FromSpaceContains(address));
Page* page = Page::FromAddress(address);
page->SetRegionMarks(page->GetRegionMarks() |
page->GetRegionMaskForSpan(address + start, len * kPointerSize));
if (!InNewSpace(address)) {
for (int i = 0; i < len; i++) {
store_buffer_.Mark(address + start + i * kPointerSize);
}
}
}
@ -343,31 +366,6 @@ void Heap::CopyBlock(Address dst, Address src, int byte_size) {
}
void Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
Address src,
int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
Page* page = Page::FromAddress(dst);
uint32_t marks = page->GetRegionMarks();
for (int remaining = byte_size / kPointerSize;
remaining > 0;
remaining--) {
Memory::Object_at(dst) = Memory::Object_at(src);
if (InNewSpace(Memory::Object_at(dst))) {
marks |= page->GetRegionMaskForAddress(dst);
}
dst += kPointerSize;
src += kPointerSize;
}
page->SetRegionMarks(marks);
}
void Heap::MoveBlock(Address dst, Address src, int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
@ -387,16 +385,6 @@ void Heap::MoveBlock(Address dst, Address src, int byte_size) {
}
void Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
Address src,
int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
ASSERT((dst < src) || (dst >= (src + byte_size)));
CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, byte_size);
}
void Heap::ScavengePointer(HeapObject** p) {
ScavengeObject(p, *p);
}
@ -414,7 +402,9 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
// If the first word is a forwarding address, the object has already been
// copied.
if (first_word.IsForwardingAddress()) {
*p = first_word.ToForwardingAddress();
HeapObject* dest = first_word.ToForwardingAddress();
ASSERT(HEAP->InFromSpace(*p));
*p = dest;
return;
}
@ -459,7 +449,7 @@ int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
amount_of_external_allocated_memory_ -
amount_of_external_allocated_memory_at_last_global_gc_;
if (amount_since_last_global_gc > external_allocation_limit_) {
CollectAllGarbage(false);
CollectAllGarbage(kNoGCFlags);
}
} else {
// Avoid underflow.
@ -476,6 +466,7 @@ void Heap::SetLastScriptId(Object* last_script_id) {
roots_[kLastScriptIdRootIndex] = last_script_id;
}
Isolate* Heap::isolate() {
return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) -
reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4);
@ -688,15 +679,6 @@ Heap* _inline_get_heap_() {
}
void MarkCompactCollector::SetMark(HeapObject* obj) {
tracer_->increment_marked_count();
#ifdef DEBUG
UpdateLiveObjectCount(obj);
#endif
obj->SetMark();
}
} } // namespace v8::internal
#endif // V8_HEAP_INL_H_

1
deps/v8/src/heap-profiler.cc

@ -114,7 +114,6 @@ HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name,
bool generation_completed = true;
switch (s_type) {
case HeapSnapshot::kFull: {
HEAP->CollectAllGarbage(true);
HeapSnapshotGenerator generator(result, control);
generation_completed = generator.GenerateSnapshot();
break;

1531
deps/v8/src/heap.cc

File diff suppressed because it is too large

458
deps/v8/src/heap.h

@ -32,11 +32,15 @@
#include "allocation.h"
#include "globals.h"
#include "incremental-marking.h"
#include "list.h"
#include "mark-compact.h"
#include "objects-visiting.h"
#include "spaces.h"
#include "splay-tree-inl.h"
#include "store-buffer.h"
#include "v8-counters.h"
#include "v8globals.h"
namespace v8 {
namespace internal {
@ -48,20 +52,20 @@ inline Heap* _inline_get_heap_();
// Defines all the roots in Heap.
#define STRONG_ROOT_LIST(V) \
/* Put the byte array map early. We need it to be in place by the time */ \
/* the deserializer hits the next page, since it wants to put a byte */ \
/* array in the unused space at the end of the page. */ \
#define STRONG_ROOT_LIST(V) \
V(Map, byte_array_map, ByteArrayMap) \
V(Map, free_space_map, FreeSpaceMap) \
V(Map, one_pointer_filler_map, OnePointerFillerMap) \
V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
/* Cluster the most popular ones in a few cache lines here at the top. */ \
V(Object, undefined_value, UndefinedValue) \
V(Object, the_hole_value, TheHoleValue) \
V(Object, null_value, NullValue) \
V(Object, true_value, TrueValue) \
V(Object, false_value, FalseValue) \
V(Object, arguments_marker, ArgumentsMarker) \
V(Smi, store_buffer_top, StoreBufferTop) \
V(Oddball, undefined_value, UndefinedValue) \
V(Oddball, the_hole_value, TheHoleValue) \
V(Oddball, null_value, NullValue) \
V(Oddball, true_value, TrueValue) \
V(Oddball, false_value, FalseValue) \
V(Oddball, arguments_marker, ArgumentsMarker) \
V(Oddball, frame_alignment_marker, FrameAlignmentMarker) \
V(Map, heap_number_map, HeapNumberMap) \
V(Map, global_context_map, GlobalContextMap) \
V(Map, fixed_array_map, FixedArrayMap) \
@ -122,8 +126,9 @@ inline Heap* _inline_get_heap_();
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, message_object_map, JSMessageObjectMap) \
V(Map, foreign_map, ForeignMap) \
V(Object, nan_value, NanValue) \
V(Object, minus_zero_value, MinusZeroValue) \
V(HeapNumber, nan_value, NanValue) \
V(HeapNumber, infinity_value, InfinityValue) \
V(HeapNumber, minus_zero_value, MinusZeroValue) \
V(Map, neander_map, NeanderMap) \
V(JSObject, message_listeners, MessageListeners) \
V(Foreign, prototype_accessors, PrototypeAccessors) \
@ -226,7 +231,9 @@ inline Heap* _inline_get_heap_();
V(closure_symbol, "(closure)") \
V(use_strict, "use strict") \
V(dot_symbol, ".") \
V(anonymous_function_symbol, "(anonymous function)")
V(anonymous_function_symbol, "(anonymous function)") \
V(infinity_symbol, "Infinity") \
V(minus_infinity_symbol, "-Infinity")
// Forward declarations.
class GCTracer;
@ -238,10 +245,26 @@ class WeakObjectRetainer;
typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
Object** pointer);
typedef bool (*DirtyRegionCallback)(Heap* heap,
Address start,
Address end,
ObjectSlotCallback copy_object_func);
class StoreBufferRebuilder {
public:
explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
: store_buffer_(store_buffer) {
}
void Callback(MemoryChunk* page, StoreBufferEvent event);
private:
StoreBuffer* store_buffer_;
// We record in this variable how full the store buffer was when we started
// iterating over the current page, finding pointers to new space. If the
// store buffer overflows again we can exempt the page from the store buffer
// by rewinding to this point instead of having to search the store buffer.
Object*** start_of_current_page_;
// The current page we are scanning in the store buffer iterator.
MemoryChunk* current_page_;
};
// The all static Heap captures the interface to the global object heap.
@ -259,22 +282,37 @@ class PromotionQueue {
PromotionQueue() : front_(NULL), rear_(NULL) { }
void Initialize(Address start_address) {
// Assumes that a NewSpacePage exactly fits a number of promotion queue
// entries (where each is a pair of intptr_t). This allows us to simplify
// the test fpr when to switch pages.
ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
== 0);
ASSERT(NewSpacePage::IsAtEnd(start_address));
front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
}
bool is_empty() { return front_ <= rear_; }
bool is_empty() { return front_ == rear_; }
inline void insert(HeapObject* target, int size);
void remove(HeapObject** target, int* size) {
ASSERT(!is_empty());
if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
NewSpacePage* front_page =
NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
ASSERT(!front_page->prev_page()->is_anchor());
front_ =
reinterpret_cast<intptr_t*>(front_page->prev_page()->body_limit());
}
*target = reinterpret_cast<HeapObject*>(*(--front_));
*size = static_cast<int>(*(--front_));
// Assert no underflow.
ASSERT(front_ >= rear_);
SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
reinterpret_cast<Address>(front_));
}
private:
// The front of the queue is higher in memory than the rear.
// The front of the queue is higher in the memory page chain than the rear.
intptr_t* front_;
intptr_t* rear_;
@ -282,6 +320,11 @@ class PromotionQueue {
};
typedef void (*ScavengingCallback)(Map* map,
HeapObject** slot,
HeapObject* object);
// External strings table is a place where all external strings are
// registered. We need to keep track of such strings to properly
// finalize them.
@ -327,8 +370,8 @@ class Heap {
// Configure heap size before setup. Return false if the heap has been
// setup already.
bool ConfigureHeap(int max_semispace_size,
int max_old_gen_size,
int max_executable_size);
intptr_t max_old_gen_size,
intptr_t max_executable_size);
bool ConfigureHeapDefault();
// Initializes the global object heap. If create_heap_objects is true,
@ -456,6 +499,7 @@ class Heap {
// size, but keeping the original prototype. The receiver must have at least
// the size of the new object. The object is reinitialized and behaves as an
// object that has been freshly allocated.
// Returns failure if an error occured, otherwise object.
MUST_USE_RESULT MaybeObject* ReinitializeJSReceiver(JSReceiver* object,
InstanceType type,
int size);
@ -484,8 +528,10 @@ class Heap {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateMap(InstanceType instance_type,
int instance_size);
MUST_USE_RESULT MaybeObject* AllocateMap(
InstanceType instance_type,
int instance_size,
ElementsKind elements_kind = FAST_ELEMENTS);
// Allocates a partial map for bootstrapping.
MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,
@ -796,9 +842,9 @@ class Heap {
// failed.
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateExternalStringFromAscii(
ExternalAsciiString::Resource* resource);
const ExternalAsciiString::Resource* resource);
MUST_USE_RESULT MaybeObject* AllocateExternalStringFromTwoByte(
ExternalTwoByteString::Resource* resource);
const ExternalTwoByteString::Resource* resource);
// Finalizes an external string by deleting the associated external
// data and clearing the resource pointer.
@ -885,13 +931,24 @@ class Heap {
// collect more garbage.
inline bool CollectGarbage(AllocationSpace space);
// Performs a full garbage collection. Force compaction if the
// parameter is true.
void CollectAllGarbage(bool force_compaction);
static const int kNoGCFlags = 0;
static const int kMakeHeapIterableMask = 1;
// Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
// non-zero, then the slower precise sweeper is used, which leaves the heap
// in a state where we can iterate over the heap visiting all objects.
void CollectAllGarbage(int flags);
// Last hope GC, should try to squeeze as much as possible.
void CollectAllAvailableGarbage();
// Check whether the heap is currently iterable.
bool IsHeapIterable();
// Ensure that we have swept all spaces in such a way that we can iterate
// over all objects. May cause a GC.
void EnsureHeapIsIterable();
// Notify the heap that a context has been disposed.
int NotifyContextDisposed() { return ++contexts_disposed_; }
@ -899,6 +956,20 @@ class Heap {
// ensure correct callback for weak global handles.
void PerformScavenge();
inline void increment_scan_on_scavenge_pages() {
scan_on_scavenge_pages_++;
if (FLAG_gc_verbose) {
PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
}
}
inline void decrement_scan_on_scavenge_pages() {
scan_on_scavenge_pages_--;
if (FLAG_gc_verbose) {
PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
}
}
PromotionQueue* promotion_queue() { return &promotion_queue_; }
#ifdef DEBUG
@ -925,6 +996,8 @@ class Heap {
// Heap root getters. We have versions with and without type::cast() here.
// You can't use type::cast during GC because the assert fails.
// TODO(1490): Try removing the unchecked accessors, now that GC marking does
// not corrupt the stack.
#define ROOT_ACCESSOR(type, name, camel_name) \
type* name() { \
return type::cast(roots_[k##camel_name##RootIndex]); \
@ -965,60 +1038,16 @@ class Heap {
// Iterates over all the other roots in the heap.
void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
enum ExpectedPageWatermarkState {
WATERMARK_SHOULD_BE_VALID,
WATERMARK_CAN_BE_INVALID
};
// For each dirty region on a page in use from an old space call
// visit_dirty_region callback.
// If either visit_dirty_region or callback can cause an allocation
// in old space and changes in allocation watermark then
// can_preallocate_during_iteration should be set to true.
// All pages will be marked as having invalid watermark upon
// iteration completion.
void IterateDirtyRegions(
PagedSpace* space,
DirtyRegionCallback visit_dirty_region,
ObjectSlotCallback callback,
ExpectedPageWatermarkState expected_page_watermark_state);
// Interpret marks as a bitvector of dirty marks for regions of size
// Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering
// memory interval from start to top. For each dirty region call a
// visit_dirty_region callback. Return updated bitvector of dirty marks.
uint32_t IterateDirtyRegions(uint32_t marks,
Address start,
Address end,
DirtyRegionCallback visit_dirty_region,
ObjectSlotCallback callback);
// Iterate pointers to from semispace of new space found in memory interval
// from start to end.
// Update dirty marks for page containing start address.
void IterateAndMarkPointersToFromSpace(Address start,
Address end,
ObjectSlotCallback callback);
// Iterate pointers to new space found in memory interval from start to end.
// Return true if pointers to new space was found.
static bool IteratePointersInDirtyRegion(Heap* heap,
Address start,
Address end,
ObjectSlotCallback callback);
// Iterate pointers to new space found in memory interval from start to end.
// This interval is considered to belong to the map space.
// Return true if pointers to new space was found.
static bool IteratePointersInDirtyMapsRegion(Heap* heap,
Address start,
Address end,
ObjectSlotCallback callback);
// Returns whether the object resides in new space.
inline bool InNewSpace(Object* object);
inline bool InNewSpace(Address addr);
inline bool InNewSpacePage(Address addr);
inline bool InFromSpace(Object* object);
inline bool InToSpace(Object* object);
@ -1057,12 +1086,20 @@ class Heap {
roots_[kEmptyScriptRootIndex] = script;
}
void public_set_store_buffer_top(Address* top) {
roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
}
// Update the next script id.
inline void SetLastScriptId(Object* last_script_id);
// Generated code can embed this address to get access to the roots.
Object** roots_address() { return roots_; }
Address* store_buffer_top_address() {
return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
}
// Get address of global contexts list for serialization support.
Object** global_contexts_list_address() {
return &global_contexts_list_;
@ -1075,6 +1112,10 @@ class Heap {
// Verify the heap is in its normal state before or after a GC.
void Verify();
void OldPointerSpaceCheckStoreBuffer();
void MapSpaceCheckStoreBuffer();
void LargeObjectSpaceCheckStoreBuffer();
// Report heap statistics.
void ReportHeapStatistics(const char* title);
void ReportCodeStatistics(const char* title);
@ -1170,22 +1211,51 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length,
PretenureFlag pretenure);
inline intptr_t PromotedTotalSize() {
return PromotedSpaceSize() + PromotedExternalMemorySize();
}
// True if we have reached the allocation limit in the old generation that
// should force the next GC (caused normally) to be a full one.
bool OldGenerationPromotionLimitReached() {
return (PromotedSpaceSize() + PromotedExternalMemorySize())
> old_gen_promotion_limit_;
inline bool OldGenerationPromotionLimitReached() {
return PromotedTotalSize() > old_gen_promotion_limit_;
}
intptr_t OldGenerationSpaceAvailable() {
return old_gen_allocation_limit_ -
(PromotedSpaceSize() + PromotedExternalMemorySize());
inline intptr_t OldGenerationSpaceAvailable() {
return old_gen_allocation_limit_ - PromotedTotalSize();
}
// True if we have reached the allocation limit in the old generation that
// should artificially cause a GC right now.
bool OldGenerationAllocationLimitReached() {
return OldGenerationSpaceAvailable() < 0;
static const intptr_t kMinimumPromotionLimit = 5 * Page::kPageSize;
static const intptr_t kMinimumAllocationLimit =
8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
// When we sweep lazily we initially guess that there is no garbage on the
// heap and set the limits for the next GC accordingly. As we sweep we find
// out that some of the pages contained garbage and we have to adjust
// downwards the size of the heap. This means the limits that control the
// timing of the next GC also need to be adjusted downwards.
void LowerOldGenLimits(intptr_t adjustment) {
size_of_old_gen_at_last_old_space_gc_ -= adjustment;
old_gen_promotion_limit_ =
OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
old_gen_allocation_limit_ =
OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
}
intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
intptr_t limit =
Max(old_gen_size + old_gen_size / 3, kMinimumPromotionLimit);
limit += new_space_.Capacity();
limit *= old_gen_limit_factor_;
return limit;
}
intptr_t OldGenAllocationLimit(intptr_t old_gen_size) {
intptr_t limit =
Max(old_gen_size + old_gen_size / 2, kMinimumAllocationLimit);
limit += new_space_.Capacity();
limit *= old_gen_limit_factor_;
return limit;
}
// Can be called when the embedding application is idle.
@ -1213,6 +1283,8 @@ class Heap {
MUST_USE_RESULT MaybeObject* NumberToString(
Object* number, bool check_number_string_cache = true);
MUST_USE_RESULT MaybeObject* Uint32ToString(
uint32_t value, bool check_number_string_cache = true);
Map* MapForExternalArrayType(ExternalArrayType array_type);
RootListIndex RootIndexForExternalArrayType(
@ -1224,18 +1296,10 @@ class Heap {
// by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size);
inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
Address src,
int byte_size);
// Optimized version of memmove for blocks with pointer size aligned sizes and
// pointer size aligned addresses.
static inline void MoveBlock(Address dst, Address src, int byte_size);
inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
Address src,
int byte_size);
// Check new space expansion criteria and expand semispaces if it was hit.
void CheckNewSpaceExpansionCriteria();
@ -1244,9 +1308,31 @@ class Heap {
survived_since_last_expansion_ += survived;
}
inline bool NextGCIsLikelyToBeFull() {
if (FLAG_gc_global) return true;
intptr_t total_promoted = PromotedTotalSize();
intptr_t adjusted_promotion_limit =
old_gen_promotion_limit_ - new_space_.Capacity();
if (total_promoted >= adjusted_promotion_limit) return true;
intptr_t adjusted_allocation_limit =
old_gen_allocation_limit_ - new_space_.Capacity() / 5;
if (PromotedSpaceSize() >= adjusted_allocation_limit) return true;
return false;
}
void UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func);
void UpdateReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func);
void ProcessWeakReferences(WeakObjectRetainer* retainer);
// Helper function that governs the promotion policy from new space to
@ -1263,6 +1349,9 @@ class Heap {
GCTracer* tracer() { return tracer_; }
// Returns the size of objects residing in non new spaces.
intptr_t PromotedSpaceSize();
double total_regexp_code_generated() { return total_regexp_code_generated_; }
void IncreaseTotalRegexpCodeGenerated(int size) {
total_regexp_code_generated_ += size;
@ -1281,6 +1370,18 @@ class Heap {
return &mark_compact_collector_;
}
StoreBuffer* store_buffer() {
return &store_buffer_;
}
Marking* marking() {
return &marking_;
}
IncrementalMarking* incremental_marking() {
return &incremental_marking_;
}
ExternalStringTable* external_string_table() {
return &external_string_table_;
}
@ -1291,16 +1392,28 @@ class Heap {
}
inline Isolate* isolate();
bool is_safe_to_read_maps() { return is_safe_to_read_maps_; }
void CallGlobalGCPrologueCallback() {
inline void CallGlobalGCPrologueCallback() {
if (global_gc_prologue_callback_ != NULL) global_gc_prologue_callback_();
}
void CallGlobalGCEpilogueCallback() {
inline void CallGlobalGCEpilogueCallback() {
if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_();
}
inline bool OldGenerationAllocationLimitReached();
inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
}
void QueueMemoryChunkForFree(MemoryChunk* chunk);
void FreeQueuedChunks();
// Completely clear the Instanceof cache (to stop it keeping objects alive
// around a GC).
inline void CompletelyClearInstanceofCache();
private:
Heap();
@ -1308,12 +1421,12 @@ class Heap {
// more expedient to get at the isolate directly from within Heap methods.
Isolate* isolate_;
intptr_t code_range_size_;
int reserved_semispace_size_;
int max_semispace_size_;
int initial_semispace_size_;
intptr_t max_old_generation_size_;
intptr_t max_executable_size_;
intptr_t code_range_size_;
// For keeping track of how much data has survived
// scavenge since last new space expansion.
@ -1328,6 +1441,8 @@ class Heap {
// For keeping track of context disposals.
int contexts_disposed_;
int scan_on_scavenge_pages_;
#if defined(V8_TARGET_ARCH_X64)
static const int kMaxObjectSizeInNewSpace = 1024*KB;
#else
@ -1344,13 +1459,9 @@ class Heap {
HeapState gc_state_;
int gc_post_processing_depth_;
// Returns the size of object residing in non new spaces.
intptr_t PromotedSpaceSize();
// Returns the amount of external memory registered since last global gc.
int PromotedExternalMemorySize();
int mc_count_; // how many mark-compact collections happened
int ms_count_; // how many mark-sweep collections happened
unsigned int gc_count_; // how many gc happened
@ -1389,6 +1500,13 @@ class Heap {
// every allocation in large object space.
intptr_t old_gen_allocation_limit_;
// Sometimes the heuristics dictate that those limits are increased. This
// variable records that fact.
int old_gen_limit_factor_;
// Used to adjust the limits that control the timing of the next GC.
intptr_t size_of_old_gen_at_last_old_space_gc_;
// Limit on the amount of externally allocated memory allowed
// between global GCs. If reached a global GC is forced.
intptr_t external_allocation_limit_;
@ -1408,6 +1526,8 @@ class Heap {
Object* global_contexts_list_;
StoreBufferRebuilder store_buffer_rebuilder_;
struct StringTypeTable {
InstanceType type;
int size;
@ -1465,13 +1585,11 @@ class Heap {
// Support for computing object sizes during GC.
HeapObjectCallback gc_safe_size_of_old_object_;
static int GcSafeSizeOfOldObject(HeapObject* object);
static int GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object);
// Update the GC state. Called from the mark-compact collector.
void MarkMapPointersAsEncoded(bool encoded) {
gc_safe_size_of_old_object_ = encoded
? &GcSafeSizeOfOldObjectWithEncodedMap
: &GcSafeSizeOfOldObject;
ASSERT(!encoded);
gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject;
}
// Checks whether a global GC is necessary
@ -1483,11 +1601,10 @@ class Heap {
bool PerformGarbageCollection(GarbageCollector collector,
GCTracer* tracer);
static const intptr_t kMinimumPromotionLimit = 2 * MB;
static const intptr_t kMinimumAllocationLimit = 8 * MB;
inline void UpdateOldSpaceLimits();
// Allocate an uninitialized object in map space. The behavior is identical
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
// have to test the allocation space argument and (b) can reduce code size
@ -1522,8 +1639,6 @@ class Heap {
// Allocate empty fixed double array.
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();
void SwitchScavengingVisitorsTableIfProfilingWasEnabled();
// Performs a minor collection in new generation.
void Scavenge();
@ -1532,16 +1647,15 @@ class Heap {
Object** pointer);
Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
static void ScavengeStoreBufferCallback(Heap* heap,
MemoryChunk* page,
StoreBufferEvent event);
// Performs a major collection in the whole heap.
void MarkCompact(GCTracer* tracer);
// Code to be run before and after mark-compact.
void MarkCompactPrologue(bool is_compacting);
// Completely clear the Instanceof cache (to stop it keeping objects alive
// around a GC).
inline void CompletelyClearInstanceofCache();
void MarkCompactPrologue();
// Record statistics before and after garbage collection.
void ReportStatisticsBeforeGC();
@ -1551,12 +1665,11 @@ class Heap {
static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
// Initializes a function with a shared part and prototype.
// Returns the function.
// Note: this code was factored out of AllocateFunction such that
// other parts of the VM could use it. Specifically, a function that creates
// instances of type JS_FUNCTION_TYPE benefit from the use of this function.
// Please note this does not perform a garbage collection.
MUST_USE_RESULT inline MaybeObject* InitializeFunction(
inline void InitializeFunction(
JSFunction* function,
SharedFunctionInfo* shared,
Object* prototype);
@ -1621,6 +1734,8 @@ class Heap {
return high_survival_rate_period_length_ > 0;
}
void SelectScavengingVisitorsTable();
static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
@ -1640,10 +1755,11 @@ class Heap {
MarkCompactCollector mark_compact_collector_;
// This field contains the meaning of the WATERMARK_INVALIDATED flag.
// Instead of clearing this flag from all pages we just flip
// its meaning at the beginning of a scavenge.
intptr_t page_watermark_invalidated_mark_;
StoreBuffer store_buffer_;
Marking marking_;
IncrementalMarking incremental_marking_;
int number_idle_notifications_;
unsigned int last_idle_notification_gc_count_;
@ -1658,7 +1774,9 @@ class Heap {
ExternalStringTable external_string_table_;
bool is_safe_to_read_maps_;
VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
MemoryChunk* chunks_queued_for_free_;
friend class Factory;
friend class GCTracer;
@ -1757,29 +1875,6 @@ class VerifyPointersVisitor: public ObjectVisitor {
}
}
};
// Visitor class to verify interior pointers in spaces that use region marks
// to keep track of intergenerational references.
// As VerifyPointersVisitor but also checks that dirty marks are set
// for regions covering intergenerational references.
class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
ASSERT(HEAP->Contains(object));
ASSERT(object->map()->IsMap());
if (HEAP->InNewSpace(object)) {
ASSERT(HEAP->InToSpace(object));
Address addr = reinterpret_cast<Address>(current);
ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr));
}
}
}
}
};
#endif
@ -2112,16 +2207,6 @@ class GCTracer BASE_EMBEDDED {
// Sets the full GC count.
void set_full_gc_count(int count) { full_gc_count_ = count; }
// Sets the flag that this is a compacting full GC.
void set_is_compacting() { is_compacting_ = true; }
bool is_compacting() const { return is_compacting_; }
// Increment and decrement the count of marked objects.
void increment_marked_count() { ++marked_count_; }
void decrement_marked_count() { --marked_count_; }
int marked_count() { return marked_count_; }
void increment_promoted_objects_size(int object_size) {
promoted_objects_size_ += object_size;
}
@ -2146,23 +2231,6 @@ class GCTracer BASE_EMBEDDED {
// A count (including this one) of the number of full garbage collections.
int full_gc_count_;
// True if the current GC is a compacting full collection, false
// otherwise.
bool is_compacting_;
// True if the *previous* full GC cwas a compacting collection (will be
// false if there has not been a previous full GC).
bool previous_has_compacted_;
// On a full GC, a count of the number of marked objects. Incremented
// when an object is marked and decremented when an object's mark bit is
// cleared. Will be zero on a scavenge collection.
int marked_count_;
// The count from the end of the previous full GC. Will be zero if there
// was no previous full GC.
int previous_marked_count_;
// Amounts of time spent in different scopes during GC.
double scopes_[Scope::kNumberOfScopes];
@ -2181,6 +2249,13 @@ class GCTracer BASE_EMBEDDED {
// Size of objects promoted during the current collection.
intptr_t promoted_objects_size_;
// Incremental marking steps counters.
int steps_count_;
double steps_took_;
double longest_step_;
int steps_count_since_last_gc_;
double steps_took_since_last_gc_;
Heap* heap_;
};
@ -2292,6 +2367,46 @@ class WeakObjectRetainer {
};
// Intrusive object marking uses least significant bit of
// heap object's map word to mark objects.
// Normally all map words have least significant bit set
// because they contain tagged map pointer.
// If the bit is not set object is marked.
// All objects should be unmarked before resuming
// JavaScript execution.
class IntrusiveMarking {
public:
static bool IsMarked(HeapObject* object) {
return (object->map_word().ToRawValue() & kNotMarkedBit) == 0;
}
static void ClearMark(HeapObject* object) {
uintptr_t map_word = object->map_word().ToRawValue();
object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit));
ASSERT(!IsMarked(object));
}
static void SetMark(HeapObject* object) {
uintptr_t map_word = object->map_word().ToRawValue();
object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit));
ASSERT(IsMarked(object));
}
static Map* MapOfMarkedObject(HeapObject* object) {
uintptr_t map_word = object->map_word().ToRawValue();
return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap();
}
static int SizeOfMarkedObject(HeapObject* object) {
return object->SizeFromMap(MapOfMarkedObject(object));
}
private:
static const uintptr_t kNotMarkedBit = 0x1;
STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0);
};
#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
// Helper class for tracing paths to a search target Object from all roots.
// The TracePathFrom() method can be used to trace paths from a specific
@ -2350,7 +2465,6 @@ class PathTracer : public ObjectVisitor {
};
#endif // DEBUG || LIVE_OBJECT_LIST
} } // namespace v8::internal
#undef HEAP

66
deps/v8/src/hydrogen-instructions.cc

@ -707,6 +707,14 @@ void HUnaryControlInstruction::PrintDataTo(StringStream* stream) {
}
void HIsNilAndBranch::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(kind() == kStrictEquality ? " === " : " == ");
stream->Add(nil() == kNullValue ? "null" : "undefined");
HControlInstruction::PrintDataTo(stream);
}
void HReturn::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
}
@ -777,15 +785,22 @@ void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" == ");
stream->Add(type_literal_->GetFlatContent().ToAsciiVector());
HControlInstruction::PrintDataTo(stream);
}
void HTypeof::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
}
void HChange::PrintDataTo(StringStream* stream) {
HUnaryOperation::PrintDataTo(stream);
stream->Add(" %s to %s", from_.Mnemonic(), to().Mnemonic());
stream->Add(" %s to %s", from().Mnemonic(), to().Mnemonic());
if (CanTruncateToInt32()) stream->Add(" truncating-int32");
if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
if (CheckFlag(kDeoptimizeOnUndefined)) stream->Add(" deopt-on-undefined");
}
@ -857,6 +872,23 @@ void HCheckFunction::PrintDataTo(StringStream* stream) {
}
const char* HCheckInstanceType::GetCheckName() {
switch (check_) {
case IS_SPEC_OBJECT: return "object";
case IS_JS_ARRAY: return "array";
case IS_STRING: return "string";
case IS_SYMBOL: return "symbol";
}
UNREACHABLE();
return "";
}
void HCheckInstanceType::PrintDataTo(StringStream* stream) {
stream->Add("%s ", GetCheckName());
HUnaryOperation::PrintDataTo(stream);
}
void HCallStub::PrintDataTo(StringStream* stream) {
stream->Add("%s ",
CodeStub::MajorName(major_key_, false));
@ -1311,6 +1343,14 @@ void HCompareIDAndBranch::PrintDataTo(StringStream* stream) {
}
void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) {
left()->PrintNameTo(stream);
stream->Add(" ");
right()->PrintNameTo(stream);
HControlInstruction::PrintDataTo(stream);
}
void HGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", SuccessorAt(0)->block_id());
}
@ -1425,7 +1465,7 @@ void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
}
bool HLoadKeyedFastElement::RequiresHoleCheck() const {
bool HLoadKeyedFastElement::RequiresHoleCheck() {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
if (!use->IsChange()) return true;
@ -1442,11 +1482,6 @@ void HLoadKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
}
bool HLoadKeyedFastDoubleElement::RequiresHoleCheck() const {
return true;
}
void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add("[");
@ -1488,6 +1523,7 @@ void HLoadKeyedSpecializedArrayElement::PrintDataTo(
stream->Add("pixel");
break;
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@ -1582,6 +1618,7 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo(
case EXTERNAL_PIXEL_ELEMENTS:
stream->Add("pixel");
break;
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
@ -1598,7 +1635,18 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo(
void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
stream->Add("[%p]", *cell());
if (check_hole_value()) stream->Add(" (deleteable/read-only)");
if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
if (details_.IsReadOnly()) stream->Add(" (read-only)");
}
bool HLoadGlobalCell::RequiresHoleCheck() {
if (details_.IsDontDelete() && !details_.IsReadOnly()) return false;
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
if (!use->IsChange()) return true;
}
return false;
}
@ -1610,6 +1658,8 @@ void HLoadGlobalGeneric::PrintDataTo(StringStream* stream) {
void HStoreGlobalCell::PrintDataTo(StringStream* stream) {
stream->Add("[%p] = ", *cell());
value()->PrintNameTo(stream);
if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
if (details_.IsReadOnly()) stream->Add(" (read-only)");
}

344
deps/v8/src/hydrogen-instructions.h

File diff suppressed because it is too large

408
deps/v8/src/hydrogen.cc

@ -422,7 +422,7 @@ class ReachabilityAnalyzer BASE_EMBEDDED {
};
void HGraph::Verify() const {
void HGraph::Verify(bool do_full_verify) const {
for (int i = 0; i < blocks_.length(); i++) {
HBasicBlock* block = blocks_.at(i);
@ -473,25 +473,27 @@ void HGraph::Verify() const {
// Check special property of first block to have no predecessors.
ASSERT(blocks_.at(0)->predecessors()->is_empty());
// Check that the graph is fully connected.
ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
ASSERT(analyzer.visited_count() == blocks_.length());
if (do_full_verify) {
// Check that the graph is fully connected.
ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
ASSERT(analyzer.visited_count() == blocks_.length());
// Check that entry block dominator is NULL.
ASSERT(entry_block_->dominator() == NULL);
// Check that entry block dominator is NULL.
ASSERT(entry_block_->dominator() == NULL);
// Check dominators.
for (int i = 0; i < blocks_.length(); ++i) {
HBasicBlock* block = blocks_.at(i);
if (block->dominator() == NULL) {
// Only start block may have no dominator assigned to.
ASSERT(i == 0);
} else {
// Assert that block is unreachable if dominator must not be visited.
ReachabilityAnalyzer dominator_analyzer(entry_block_,
blocks_.length(),
block->dominator());
ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id()));
// Check dominators.
for (int i = 0; i < blocks_.length(); ++i) {
HBasicBlock* block = blocks_.at(i);
if (block->dominator() == NULL) {
// Only start block may have no dominator assigned to.
ASSERT(i == 0);
} else {
// Assert that block is unreachable if dominator must not be visited.
ReachabilityAnalyzer dominator_analyzer(entry_block_,
blocks_.length(),
block->dominator());
ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id()));
}
}
}
}
@ -850,7 +852,7 @@ void HGraph::EliminateUnreachablePhis() {
}
bool HGraph::CheckPhis() {
bool HGraph::CheckArgumentsPhiUses() {
int block_count = blocks_.length();
for (int i = 0; i < block_count; ++i) {
for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
@ -863,13 +865,11 @@ bool HGraph::CheckPhis() {
}
bool HGraph::CollectPhis() {
bool HGraph::CheckConstPhiUses() {
int block_count = blocks_.length();
phi_list_ = new ZoneList<HPhi*>(block_count);
for (int i = 0; i < block_count; ++i) {
for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
HPhi* phi = blocks_[i]->phis()->at(j);
phi_list_->Add(phi);
// Check for the hole value (from an uninitialized const).
for (int k = 0; k < phi->OperandCount(); k++) {
if (phi->OperandAt(k) == GetConstantHole()) return false;
@ -880,6 +880,18 @@ bool HGraph::CollectPhis() {
}
void HGraph::CollectPhis() {
int block_count = blocks_.length();
phi_list_ = new ZoneList<HPhi*>(block_count);
for (int i = 0; i < block_count; ++i) {
for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
HPhi* phi = blocks_[i]->phis()->at(j);
phi_list_->Add(phi);
}
}
}
void HGraph::InferTypes(ZoneList<HValue*>* worklist) {
BitVector in_worklist(GetMaximumValueID());
for (int i = 0; i < worklist->length(); ++i) {
@ -1848,7 +1860,7 @@ void HGraph::InsertRepresentationChangeForUse(HValue* value,
}
if (new_value == NULL) {
new_value = new(zone()) HChange(value, value->representation(), to,
new_value = new(zone()) HChange(value, to,
is_truncating, deoptimize_on_undefined);
}
@ -2320,17 +2332,24 @@ HGraph* HGraphBuilder::CreateGraph() {
graph()->OrderBlocks();
graph()->AssignDominators();
#ifdef DEBUG
// Do a full verify after building the graph and computing dominators.
graph()->Verify(true);
#endif
graph()->PropagateDeoptimizingMark();
graph()->EliminateRedundantPhis();
if (!graph()->CheckPhis()) {
Bailout("Unsupported phi use of arguments object");
if (!graph()->CheckConstPhiUses()) {
Bailout("Unsupported phi use of const variable");
return NULL;
}
if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis();
if (!graph()->CollectPhis()) {
Bailout("Unsupported phi use of uninitialized constant");
graph()->EliminateRedundantPhis();
if (!graph()->CheckArgumentsPhiUses()) {
Bailout("Unsupported phi use of arguments");
return NULL;
}
if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis();
graph()->CollectPhis();
HInferRepresentation rep(graph());
rep.Analyze();
@ -3127,6 +3146,16 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
}
switch (variable->location()) {
case Variable::UNALLOCATED: {
// Handle known global constants like 'undefined' specially to avoid a
// load from a global cell for them.
Handle<Object> constant_value =
isolate()->factory()->GlobalConstantFor(variable->name());
if (!constant_value.is_null()) {
HConstant* instr =
new(zone()) HConstant(constant_value, Representation::Tagged());
return ast_context()->ReturnInstruction(instr, expr->id());
}
LookupResult lookup;
GlobalPropertyAccess type =
LookupGlobalProperty(variable, &lookup, false);
@ -3139,8 +3168,8 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
if (type == kUseCell) {
Handle<GlobalObject> global(info()->global_object());
Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
HLoadGlobalCell* instr = new(zone()) HLoadGlobalCell(cell, check_hole);
HLoadGlobalCell* instr =
new(zone()) HLoadGlobalCell(cell, lookup.GetPropertyDetails());
return ast_context()->ReturnInstruction(instr, expr->id());
} else {
HValue* context = environment()->LookupContext();
@ -3317,7 +3346,43 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
HValue* key = AddInstruction(
new(zone()) HConstant(Handle<Object>(Smi::FromInt(i)),
Representation::Integer32()));
HInstruction* elements_kind =
AddInstruction(new(zone()) HElementsKind(literal));
HBasicBlock* store_fast = graph()->CreateBasicBlock();
// Two empty blocks to satisfy edge split form.
HBasicBlock* store_fast_edgesplit1 = graph()->CreateBasicBlock();
HBasicBlock* store_fast_edgesplit2 = graph()->CreateBasicBlock();
HBasicBlock* store_generic = graph()->CreateBasicBlock();
HBasicBlock* check_smi_only_elements = graph()->CreateBasicBlock();
HBasicBlock* join = graph()->CreateBasicBlock();
HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(value);
smicheck->SetSuccessorAt(0, store_fast_edgesplit1);
smicheck->SetSuccessorAt(1, check_smi_only_elements);
current_block()->Finish(smicheck);
store_fast_edgesplit1->Finish(new(zone()) HGoto(store_fast));
set_current_block(check_smi_only_elements);
HCompareConstantEqAndBranch* smi_elements_check =
new(zone()) HCompareConstantEqAndBranch(elements_kind,
FAST_SMI_ONLY_ELEMENTS,
Token::EQ_STRICT);
smi_elements_check->SetSuccessorAt(0, store_generic);
smi_elements_check->SetSuccessorAt(1, store_fast_edgesplit2);
current_block()->Finish(smi_elements_check);
store_fast_edgesplit2->Finish(new(zone()) HGoto(store_fast));
set_current_block(store_fast);
AddInstruction(new(zone()) HStoreKeyedFastElement(elements, key, value));
store_fast->Goto(join);
set_current_block(store_generic);
AddInstruction(BuildStoreKeyedGeneric(literal, key, value));
store_generic->Goto(join);
join->SetJoinId(expr->id());
set_current_block(join);
AddSimulate(expr->GetIdForElement(i));
}
return ast_context()->ReturnValue(Pop());
@ -3561,10 +3626,10 @@ void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
LookupResult lookup;
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
if (type == kUseCell) {
bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
Handle<GlobalObject> global(info()->global_object());
Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
HInstruction* instr = new(zone()) HStoreGlobalCell(value, cell, check_hole);
HInstruction* instr =
new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails());
instr->set_position(position);
AddInstruction(instr);
if (instr->HasSideEffects()) AddSimulate(ast_id);
@ -3928,6 +3993,7 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
break;
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
@ -3944,6 +4010,30 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
}
HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
HValue* checked_key,
HValue* val,
ElementsKind elements_kind,
bool is_store) {
if (is_store) {
ASSERT(val != NULL);
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
return new(zone()) HStoreKeyedFastDoubleElement(
elements, checked_key, val);
} else { // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS.
return new(zone()) HStoreKeyedFastElement(
elements, checked_key, val, elements_kind);
}
}
// It's an element load (!is_store).
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
} else { // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS.
return new(zone()) HLoadKeyedFastElement(elements, checked_key);
}
}
HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
@ -3951,17 +4041,20 @@ HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
bool is_store) {
ASSERT(expr->IsMonomorphic());
Handle<Map> map = expr->GetMonomorphicReceiverType();
if (!map->has_fast_elements() &&
!map->has_fast_double_elements() &&
AddInstruction(new(zone()) HCheckNonSmi(object));
HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map));
bool fast_smi_only_elements = map->has_fast_smi_only_elements();
bool fast_elements = map->has_fast_elements();
bool fast_double_elements = map->has_fast_double_elements();
if (!fast_smi_only_elements &&
!fast_elements &&
!fast_double_elements &&
!map->has_external_array_elements()) {
return is_store ? BuildStoreKeyedGeneric(object, key, val)
: BuildLoadKeyedGeneric(object, key);
}
AddInstruction(new(zone()) HCheckNonSmi(object));
HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map));
HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
bool fast_double_elements = map->has_fast_double_elements();
if (is_store && map->has_fast_elements()) {
if (is_store && (fast_elements || fast_smi_only_elements)) {
AddInstruction(new(zone()) HCheckMap(
elements, isolate()->factory()->fixed_array_map()));
}
@ -3976,28 +4069,15 @@ HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
return BuildExternalArrayElementAccess(external_elements, checked_key,
val, map->elements_kind(), is_store);
}
ASSERT(map->has_fast_elements() || fast_double_elements);
ASSERT(fast_smi_only_elements || fast_elements || fast_double_elements);
if (map->instance_type() == JS_ARRAY_TYPE) {
length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck));
} else {
length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
}
checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
if (is_store) {
if (fast_double_elements) {
return new(zone()) HStoreKeyedFastDoubleElement(elements,
checked_key,
val);
} else {
return new(zone()) HStoreKeyedFastElement(elements, checked_key, val);
}
} else {
if (fast_double_elements) {
return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
} else {
return new(zone()) HLoadKeyedFastElement(elements, checked_key);
}
}
return BuildFastElementAccess(elements, checked_key, val,
map->elements_kind(), is_store);
}
@ -4039,14 +4119,20 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
HLoadExternalArrayPointer* external_elements = NULL;
HInstruction* checked_key = NULL;
// FAST_ELEMENTS is assumed to be the first case.
STATIC_ASSERT(FAST_ELEMENTS == 0);
// Generated code assumes that FAST_SMI_ONLY_ELEMENTS, FAST_ELEMENTS,
// FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS are handled before external
// arrays.
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(FAST_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(DICTIONARY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
for (ElementsKind elements_kind = FAST_ELEMENTS;
for (ElementsKind elements_kind = FIRST_ELEMENTS_KIND;
elements_kind <= LAST_ELEMENTS_KIND;
elements_kind = ElementsKind(elements_kind + 1)) {
// After having handled FAST_ELEMENTS and DICTIONARY_ELEMENTS, we
// need to add some code that's executed for all external array cases.
// After having handled FAST_ELEMENTS, FAST_SMI_ONLY_ELEMENTS,
// FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS, we need to add some code
// that's executed for all external array cases.
STATIC_ASSERT(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND ==
LAST_ELEMENTS_KIND);
if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND
@ -4068,15 +4154,25 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
set_current_block(if_true);
HInstruction* access;
if (elements_kind == FAST_ELEMENTS ||
if (elements_kind == FAST_SMI_ONLY_ELEMENTS ||
elements_kind == FAST_ELEMENTS ||
elements_kind == FAST_DOUBLE_ELEMENTS) {
bool fast_double_elements =
elements_kind == FAST_DOUBLE_ELEMENTS;
if (is_store && elements_kind == FAST_ELEMENTS) {
if (is_store && elements_kind == FAST_SMI_ONLY_ELEMENTS) {
AddInstruction(new(zone()) HCheckSmi(val));
}
if (is_store && elements_kind != FAST_DOUBLE_ELEMENTS) {
AddInstruction(new(zone()) HCheckMap(
elements, isolate()->factory()->fixed_array_map(),
elements_kind_branch));
}
// TODO(jkummerow): The need for these two blocks could be avoided
// in one of two ways:
// (1) Introduce ElementsKinds for JSArrays that are distinct from
// those for fast objects.
// (2) Put the common instructions into a third "join" block. This
// requires additional AST IDs that we can deopt to from inside
// that join block. They must be added to the Property class (when
// it's a keyed property) and registered in the full codegen.
HBasicBlock* if_jsarray = graph()->CreateBasicBlock();
HBasicBlock* if_fastobject = graph()->CreateBasicBlock();
HHasInstanceTypeAndBranch* typecheck =
@ -4086,29 +4182,15 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
current_block()->Finish(typecheck);
set_current_block(if_jsarray);
HInstruction* length = new(zone()) HJSArrayLength(object, typecheck);
AddInstruction(length);
HInstruction* length;
length = AddInstruction(new(zone()) HJSArrayLength(object, typecheck));
checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
if (is_store) {
if (fast_double_elements) {
access = AddInstruction(
new(zone()) HStoreKeyedFastDoubleElement(elements,
checked_key,
val));
} else {
access = AddInstruction(
new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
}
} else {
if (fast_double_elements) {
access = AddInstruction(
new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key));
} else {
access = AddInstruction(
new(zone()) HLoadKeyedFastElement(elements, checked_key));
}
access = AddInstruction(BuildFastElementAccess(
elements, checked_key, val, elements_kind, is_store));
if (!is_store) {
Push(access);
}
*has_side_effects |= access->HasSideEffects();
if (position != -1) {
access->set_position(position);
@ -4118,25 +4200,8 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
set_current_block(if_fastobject);
length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
if (is_store) {
if (fast_double_elements) {
access = AddInstruction(
new(zone()) HStoreKeyedFastDoubleElement(elements,
checked_key,
val));
} else {
access = AddInstruction(
new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
}
} else {
if (fast_double_elements) {
access = AddInstruction(
new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key));
} else {
access = AddInstruction(
new(zone()) HLoadKeyedFastElement(elements, checked_key));
}
}
access = AddInstruction(BuildFastElementAccess(
elements, checked_key, val, elements_kind, is_store));
} else if (elements_kind == DICTIONARY_ELEMENTS) {
if (is_store) {
access = AddInstruction(BuildStoreKeyedGeneric(object, key, val));
@ -4474,20 +4539,25 @@ bool HGraphBuilder::TryInline(Call* expr) {
return false;
}
// No context change required.
CompilationInfo* outer_info = info();
#if !defined(V8_TARGET_ARCH_IA32)
// Target must be able to use caller's context.
if (target->context() != outer_info->closure()->context() ||
outer_info->scope()->contains_with() ||
outer_info->scope()->num_heap_slots() > 0) {
TraceInline(target, caller, "target requires context change");
return false;
}
#endif
// Don't inline deeper than kMaxInliningLevels calls.
HEnvironment* env = environment();
int current_level = 1;
while (env->outer() != NULL) {
if (current_level == Compiler::kMaxInliningLevels) {
if (current_level == (FLAG_limit_inlining
? Compiler::kMaxInliningLevels
: 2 * Compiler::kMaxInliningLevels)) {
TraceInline(target, caller, "inline depth limit reached");
return false;
}
@ -4593,7 +4663,8 @@ bool HGraphBuilder::TryInline(Call* expr) {
ASSERT(target_shared->has_deoptimization_support());
TypeFeedbackOracle target_oracle(
Handle<Code>(target_shared->code()),
Handle<Context>(target->context()->global_context()));
Handle<Context>(target->context()->global_context()),
isolate());
FunctionState target_state(this, &target_info, &target_oracle);
HConstant* undefined = graph()->GetConstantUndefined();
@ -4602,6 +4673,17 @@ bool HGraphBuilder::TryInline(Call* expr) {
function,
undefined,
call_kind);
#ifdef V8_TARGET_ARCH_IA32
// IA32 only, overwrite the caller's context in the deoptimization
// environment with the correct one.
//
// TODO(kmillikin): implement the same inlining on other platforms so we
// can remove the unsightly ifdefs in this function.
HConstant* context = new HConstant(Handle<Context>(target->context()),
Representation::Tagged());
AddInstruction(context);
inner_env->BindContext(context);
#endif
HBasicBlock* body_entry = CreateBasicBlock(inner_env);
current_block()->Goto(body_entry);
body_entry->SetJoinId(expr->ReturnId());
@ -4922,8 +5004,8 @@ void HGraphBuilder::VisitCall(Call* expr) {
}
} else {
expr->RecordTypeFeedback(oracle(), CALL_AS_FUNCTION);
VariableProxy* proxy = expr->expression()->AsVariableProxy();
// FIXME.
bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
if (global_call) {
@ -4975,6 +5057,46 @@ void HGraphBuilder::VisitCall(Call* expr) {
Drop(argument_count);
}
} else if (expr->IsMonomorphic()) {
// The function is on the stack in the unoptimized code during
// evaluation of the arguments.
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* function = Top();
HValue* context = environment()->LookupContext();
HGlobalObject* global = new(zone()) HGlobalObject(context);
HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global);
AddInstruction(global);
PushAndAdd(receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments()));
AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
if (TryInline(expr)) {
// The function is lingering in the deoptimization environment.
// Handle it by case analysis on the AST context.
if (ast_context()->IsEffect()) {
Drop(1);
} else if (ast_context()->IsValue()) {
HValue* result = Pop();
Drop(1);
Push(result);
} else if (ast_context()->IsTest()) {
TestContext* context = TestContext::cast(ast_context());
if (context->if_true()->HasPredecessor()) {
context->if_true()->last_environment()->Drop(1);
}
if (context->if_false()->HasPredecessor()) {
context->if_true()->last_environment()->Drop(1);
}
} else {
UNREACHABLE();
}
return;
} else {
call = PreProcessCall(new(zone()) HInvokeFunction(context,
function,
argument_count));
Drop(1); // The function.
}
} else {
CHECK_ALIVE(VisitArgument(expr->expression()));
HValue* context = environment()->LookupContext();
@ -5668,26 +5790,36 @@ Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
}
void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* compare_expr,
Expression* expr,
void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
Expression* sub_expr,
Handle<String> check) {
CHECK_ALIVE(VisitForTypeOf(expr));
HValue* expr_value = Pop();
HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(expr_value, check);
instr->set_position(compare_expr->position());
return ast_context()->ReturnControl(instr, compare_expr->id());
CHECK_ALIVE(VisitForTypeOf(sub_expr));
HValue* value = Pop();
HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check);
instr->set_position(expr->position());
return ast_context()->ReturnControl(instr, expr->id());
}
void HGraphBuilder::HandleLiteralCompareUndefined(
CompareOperation* compare_expr, Expression* expr) {
CHECK_ALIVE(VisitForValue(expr));
HValue* lhs = Pop();
HValue* rhs = graph()->GetConstantUndefined();
HCompareObjectEqAndBranch* instr =
new(zone()) HCompareObjectEqAndBranch(lhs, rhs);
instr->set_position(compare_expr->position());
return ast_context()->ReturnControl(instr, compare_expr->id());
bool HGraphBuilder::TryLiteralCompare(CompareOperation* expr) {
Expression *sub_expr;
Handle<String> check;
if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
HandleLiteralCompareTypeof(expr, sub_expr, check);
return true;
}
if (expr->IsLiteralCompareUndefined(&sub_expr)) {
HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue);
return true;
}
if (expr->IsLiteralCompareNull(&sub_expr)) {
HandleLiteralCompareNil(expr, sub_expr, kNullValue);
return true;
}
return false;
}
@ -5709,17 +5841,7 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
}
// Check for special cases that compare against literals.
Expression *sub_expr;
Handle<String> check;
if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
HandleLiteralCompareTypeof(expr, sub_expr, check);
return;
}
if (expr->IsLiteralCompareUndefined(&sub_expr)) {
HandleLiteralCompareUndefined(expr, sub_expr);
return;
}
if (TryLiteralCompare(expr)) return;
TypeInfo type_info = oracle()->CompareType(expr);
// Check if this expression was ever executed according to type feedback.
@ -5824,14 +5946,18 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
}
void HGraphBuilder::VisitCompareToNull(CompareToNull* expr) {
void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
Expression* sub_expr,
NilValue nil) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
CHECK_ALIVE(VisitForValue(expr->expression()));
CHECK_ALIVE(VisitForValue(sub_expr));
HValue* value = Pop();
HIsNullAndBranch* instr =
new(zone()) HIsNullAndBranch(value, expr->is_strict());
EqualityKind kind =
expr->op() == Token::EQ_STRICT ? kStrictEquality : kNonStrictEquality;
HIsNilAndBranch* instr = new(zone()) HIsNilAndBranch(value, kind, nil);
instr->set_position(expr->position());
return ast_context()->ReturnControl(instr, expr->id());
}
@ -5914,9 +6040,7 @@ void HGraphBuilder::GenerateIsFunction(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasInstanceTypeAndBranch* result =
new(zone()) HHasInstanceTypeAndBranch(value,
JS_FUNCTION_TYPE,
JS_FUNCTION_PROXY_TYPE);
new(zone()) HHasInstanceTypeAndBranch(value, JS_FUNCTION_TYPE);
return ast_context()->ReturnControl(result, call->id());
}
@ -6816,7 +6940,7 @@ void HPhase::End() const {
}
#ifdef DEBUG
if (graph_ != NULL) graph_->Verify();
if (graph_ != NULL) graph_->Verify(false); // No full verify.
if (allocator_ != NULL) allocator_->Verify();
#endif
}

27
deps/v8/src/hydrogen.h

@ -243,11 +243,13 @@ class HGraph: public ZoneObject {
// Returns false if there are phi-uses of the arguments-object
// which are not supported by the optimizing compiler.
bool CheckPhis();
bool CheckArgumentsPhiUses();
// Returns false if there are phi-uses of hole values comming
// from uninitialized consts.
bool CollectPhis();
// Returns false if there are phi-uses of an uninitialized const
// which are not supported by the optimizing compiler.
bool CheckConstPhiUses();
void CollectPhis();
Handle<Code> Compile(CompilationInfo* info);
@ -283,7 +285,7 @@ class HGraph: public ZoneObject {
}
#ifdef DEBUG
void Verify() const;
void Verify(bool do_full_verify) const;
#endif
private:
@ -910,11 +912,13 @@ class HGraphBuilder: public AstVisitor {
HValue* receiver,
SmallMapList* types,
Handle<String> name);
void HandleLiteralCompareTypeof(CompareOperation* compare_expr,
Expression* expr,
bool TryLiteralCompare(CompareOperation* expr);
void HandleLiteralCompareTypeof(CompareOperation* expr,
Expression* sub_expr,
Handle<String> check);
void HandleLiteralCompareUndefined(CompareOperation* compare_expr,
Expression* expr);
void HandleLiteralCompareNil(CompareOperation* expr,
Expression* sub_expr,
NilValue nil);
HStringCharCodeAt* BuildStringCharCodeAt(HValue* context,
HValue* string,
@ -938,6 +942,11 @@ class HGraphBuilder: public AstVisitor {
HValue* val,
ElementsKind elements_kind,
bool is_store);
HInstruction* BuildFastElementAccess(HValue* elements,
HValue* checked_key,
HValue* val,
ElementsKind elements_kind,
bool is_store);
HInstruction* BuildMonomorphicElementAccess(HValue* object,
HValue* key,

26
deps/v8/src/ia32/assembler-ia32-inl.h

@ -89,8 +89,13 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target);
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
if (host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
}
}
@ -116,6 +121,10 @@ void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target;
CPU::FlushICache(pc_, sizeof(Address));
if (host() != NULL && target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
host(), &Memory::Object_at(pc_), HeapObject::cast(target));
}
}
@ -147,6 +156,12 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
CPU::FlushICache(pc_, sizeof(Address));
if (host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
// evacuation candidate.
host()->GetHeap()->incremental_marking()->RecordWrite(
host(), NULL, cell);
}
}
@ -161,6 +176,11 @@ void RelocInfo::set_call_address(Address target) {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
Assembler::set_target_address_at(pc_ + 1, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
}
}
@ -194,7 +214,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitPointer(target_object_address());
visitor->VisitEmbeddedPointer(host(), target_object_address());
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
@ -222,7 +242,7 @@ template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitPointer(heap, target_object_address());
StaticVisitor::VisitEmbeddedPointer(heap, host(), target_object_address());
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);

87
deps/v8/src/ia32/assembler-ia32.cc

@ -55,6 +55,8 @@ uint64_t CpuFeatures::supported_ = 0;
uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
// The Probe method needs executable memory, so it uses Heap::CreateCode.
// Allocation failure is silent and leads to safe default.
void CpuFeatures::Probe() {
ASSERT(!initialized_);
ASSERT(supported_ == 0);
@ -86,23 +88,23 @@ void CpuFeatures::Probe() {
__ pushfd();
__ push(ecx);
__ push(ebx);
__ mov(ebp, Operand(esp));
__ mov(ebp, esp);
// If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
__ pushfd();
__ pop(eax);
__ mov(edx, Operand(eax));
__ mov(edx, eax);
__ xor_(eax, 0x200000); // Flip bit 21.
__ push(eax);
__ popfd();
__ pushfd();
__ pop(eax);
__ xor_(eax, Operand(edx)); // Different if CPUID is supported.
__ xor_(eax, edx); // Different if CPUID is supported.
__ j(not_zero, &cpuid);
// CPUID not supported. Clear the supported features in edx:eax.
__ xor_(eax, Operand(eax));
__ xor_(edx, Operand(edx));
__ xor_(eax, eax);
__ xor_(edx, edx);
__ jmp(&done);
// Invoke CPUID with 1 in eax to get feature information in
@ -118,13 +120,13 @@ void CpuFeatures::Probe() {
// Move the result from ecx:edx to edx:eax and make sure to mark the
// CPUID feature as supported.
__ mov(eax, Operand(edx));
__ mov(eax, edx);
__ or_(eax, 1 << CPUID);
__ mov(edx, Operand(ecx));
__ mov(edx, ecx);
// Done.
__ bind(&done);
__ mov(esp, Operand(ebp));
__ mov(esp, ebp);
__ pop(ebx);
__ pop(ecx);
__ popfd();
@ -286,6 +288,18 @@ bool Operand::is_reg(Register reg) const {
&& ((buf_[0] & 0x07) == reg.code()); // register codes match.
}
bool Operand::is_reg_only() const {
return (buf_[0] & 0xF8) == 0xC0; // Addressing mode is register only.
}
Register Operand::reg() const {
ASSERT(is_reg_only());
return Register::from_code(buf_[0] & 0x07);
}
// -----------------------------------------------------------------------------
// Implementation of Assembler.
@ -701,6 +715,13 @@ void Assembler::add(Register dst, const Operand& src) {
}
void Assembler::add(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x01);
emit_operand(src, dst);
}
void Assembler::add(const Operand& dst, const Immediate& x) {
ASSERT(reloc_info_writer.last_pc() != NULL);
EnsureSpace ensure_space(this);
@ -741,25 +762,29 @@ void Assembler::and_(const Operand& dst, Register src) {
void Assembler::cmpb(const Operand& op, int8_t imm8) {
EnsureSpace ensure_space(this);
EMIT(0x80);
emit_operand(edi, op); // edi == 7
if (op.is_reg(eax)) {
EMIT(0x3C);
} else {
EMIT(0x80);
emit_operand(edi, op); // edi == 7
}
EMIT(imm8);
}
void Assembler::cmpb(const Operand& dst, Register src) {
ASSERT(src.is_byte_register());
void Assembler::cmpb(const Operand& op, Register reg) {
ASSERT(reg.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x38);
emit_operand(src, dst);
emit_operand(reg, op);
}
void Assembler::cmpb(Register dst, const Operand& src) {
ASSERT(dst.is_byte_register());
void Assembler::cmpb(Register reg, const Operand& op) {
ASSERT(reg.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x3A);
emit_operand(dst, src);
emit_operand(reg, op);
}
@ -1069,18 +1094,6 @@ void Assembler::shr_cl(Register dst) {
}
void Assembler::subb(const Operand& op, int8_t imm8) {
EnsureSpace ensure_space(this);
if (op.is_reg(eax)) {
EMIT(0x2c);
} else {
EMIT(0x80);
emit_operand(ebp, op); // ebp == 5
}
EMIT(imm8);
}
void Assembler::sub(const Operand& dst, const Immediate& x) {
EnsureSpace ensure_space(this);
emit_arith(5, dst, x);
@ -1094,14 +1107,6 @@ void Assembler::sub(Register dst, const Operand& src) {
}
void Assembler::subb(Register dst, const Operand& src) {
ASSERT(dst.code() < 4);
EnsureSpace ensure_space(this);
EMIT(0x2A);
emit_operand(dst, src);
}
void Assembler::sub(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x29);
@ -1158,6 +1163,10 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
void Assembler::test_b(const Operand& op, uint8_t imm8) {
if (op.is_reg_only() && op.reg().code() >= 4) {
test(op, Immediate(imm8));
return;
}
EnsureSpace ensure_space(this);
EMIT(0xF6);
emit_operand(eax, op);
@ -1178,10 +1187,10 @@ void Assembler::xor_(Register dst, const Operand& src) {
}
void Assembler::xor_(const Operand& src, Register dst) {
void Assembler::xor_(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x31);
emit_operand(dst, src);
emit_operand(src, dst);
}
@ -2471,7 +2480,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
return;
}
}
RelocInfo rinfo(pc_, rmode, data);
RelocInfo rinfo(pc_, rmode, data, NULL);
reloc_info_writer.Write(&rinfo);
}

88
deps/v8/src/ia32/assembler-ia32.h

@ -75,6 +75,8 @@ struct Register {
static inline Register FromAllocationIndex(int index);
static Register from_code(int code) {
ASSERT(code >= 0);
ASSERT(code < kNumRegisters);
Register r = { code };
return r;
}
@ -300,9 +302,6 @@ enum ScaleFactor {
class Operand BASE_EMBEDDED {
public:
// reg
INLINE(explicit Operand(Register reg));
// XMM reg
INLINE(explicit Operand(XMMRegister xmm_reg));
@ -347,12 +346,16 @@ class Operand BASE_EMBEDDED {
// Returns true if this Operand is a wrapper for the specified register.
bool is_reg(Register reg) const;
// Returns true if this Operand is a wrapper for one register.
bool is_reg_only() const;
// Asserts that this Operand is a wrapper for one register and returns the
// register.
Register reg() const;
private:
byte buf_[6];
// The number of bytes in buf_.
unsigned int len_;
// Only valid if len_ > 4.
RelocInfo::Mode rmode_;
// reg
INLINE(explicit Operand(Register reg));
// Set the ModRM byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation.
@ -362,7 +365,15 @@ class Operand BASE_EMBEDDED {
inline void set_disp8(int8_t disp);
inline void set_dispr(int32_t disp, RelocInfo::Mode rmode);
byte buf_[6];
// The number of bytes in buf_.
unsigned int len_;
// Only valid if len_ > 4.
RelocInfo::Mode rmode_;
friend class Assembler;
friend class MacroAssembler;
friend class LCodeGen;
};
@ -671,7 +682,9 @@ class Assembler : public AssemblerBase {
void leave();
// Moves
void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
void mov_b(Register dst, const Operand& src);
void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
void mov_b(const Operand& dst, int8_t imm8);
void mov_b(const Operand& dst, Register src);
@ -687,17 +700,24 @@ class Assembler : public AssemblerBase {
void mov(const Operand& dst, Handle<Object> handle);
void mov(const Operand& dst, Register src);
void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); }
void movsx_b(Register dst, const Operand& src);
void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); }
void movsx_w(Register dst, const Operand& src);
void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); }
void movzx_b(Register dst, const Operand& src);
void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); }
void movzx_w(Register dst, const Operand& src);
// Conditional moves
void cmov(Condition cc, Register dst, int32_t imm32);
void cmov(Condition cc, Register dst, Handle<Object> handle);
void cmov(Condition cc, Register dst, Register src) {
cmov(cc, dst, Operand(src));
}
void cmov(Condition cc, Register dst, const Operand& src);
// Flag management.
@ -715,24 +735,31 @@ class Assembler : public AssemblerBase {
void adc(Register dst, int32_t imm32);
void adc(Register dst, const Operand& src);
void add(Register dst, Register src) { add(dst, Operand(src)); }
void add(Register dst, const Operand& src);
void add(const Operand& dst, Register src);
void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); }
void add(const Operand& dst, const Immediate& x);
void and_(Register dst, int32_t imm32);
void and_(Register dst, const Immediate& x);
void and_(Register dst, Register src) { and_(dst, Operand(src)); }
void and_(Register dst, const Operand& src);
void and_(const Operand& src, Register dst);
void and_(const Operand& dst, Register src);
void and_(const Operand& dst, const Immediate& x);
void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); }
void cmpb(const Operand& op, int8_t imm8);
void cmpb(Register src, const Operand& dst);
void cmpb(const Operand& dst, Register src);
void cmpb(Register reg, const Operand& op);
void cmpb(const Operand& op, Register reg);
void cmpb_al(const Operand& op);
void cmpw_ax(const Operand& op);
void cmpw(const Operand& op, Immediate imm16);
void cmp(Register reg, int32_t imm32);
void cmp(Register reg, Handle<Object> handle);
void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
void cmp(Register reg, const Operand& op);
void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
void cmp(const Operand& op, const Immediate& imm);
void cmp(const Operand& op, Handle<Object> handle);
@ -748,6 +775,7 @@ class Assembler : public AssemblerBase {
// Signed multiply instructions.
void imul(Register src); // edx:eax = eax * src.
void imul(Register dst, Register src) { imul(dst, Operand(src)); }
void imul(Register dst, const Operand& src); // dst = dst * src.
void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32.
@ -764,8 +792,10 @@ class Assembler : public AssemblerBase {
void not_(Register dst);
void or_(Register dst, int32_t imm32);
void or_(Register dst, Register src) { or_(dst, Operand(src)); }
void or_(Register dst, const Operand& src);
void or_(const Operand& dst, Register src);
void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); }
void or_(const Operand& dst, const Immediate& x);
void rcl(Register dst, uint8_t imm8);
@ -776,35 +806,42 @@ class Assembler : public AssemblerBase {
void sbb(Register dst, const Operand& src);
void shld(Register dst, Register src) { shld(dst, Operand(src)); }
void shld(Register dst, const Operand& src);
void shl(Register dst, uint8_t imm8);
void shl_cl(Register dst);
void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
void shrd(Register dst, const Operand& src);
void shr(Register dst, uint8_t imm8);
void shr_cl(Register dst);
void subb(const Operand& dst, int8_t imm8);
void subb(Register dst, const Operand& src);
void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
void sub(const Operand& dst, const Immediate& x);
void sub(Register dst, Register src) { sub(dst, Operand(src)); }
void sub(Register dst, const Operand& src);
void sub(const Operand& dst, Register src);
void test(Register reg, const Immediate& imm);
void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
void test(Register reg, const Operand& op);
void test_b(Register reg, const Operand& op);
void test(const Operand& op, const Immediate& imm);
void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); }
void test_b(const Operand& op, uint8_t imm8);
void xor_(Register dst, int32_t imm32);
void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
void xor_(Register dst, const Operand& src);
void xor_(const Operand& src, Register dst);
void xor_(const Operand& dst, Register src);
void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); }
void xor_(const Operand& dst, const Immediate& x);
// Bit operations.
void bt(const Operand& dst, Register src);
void bts(Register dst, Register src) { bts(Operand(dst), src); }
void bts(const Operand& dst, Register src);
// Miscellaneous
@ -835,6 +872,7 @@ class Assembler : public AssemblerBase {
void call(Label* L);
void call(byte* entry, RelocInfo::Mode rmode);
int CallSize(const Operand& adr);
void call(Register reg) { call(Operand(reg)); }
void call(const Operand& adr);
int CallSize(Handle<Code> code, RelocInfo::Mode mode);
void call(Handle<Code> code,
@ -845,6 +883,7 @@ class Assembler : public AssemblerBase {
// unconditional jump to L
void jmp(Label* L, Label::Distance distance = Label::kFar);
void jmp(byte* entry, RelocInfo::Mode rmode);
void jmp(Register reg) { jmp(Operand(reg)); }
void jmp(const Operand& adr);
void jmp(Handle<Code> code, RelocInfo::Mode rmode);
@ -929,6 +968,7 @@ class Assembler : public AssemblerBase {
void cvttss2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, const Operand& src);
void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
void cvtsi2sd(XMMRegister dst, const Operand& src);
void cvtss2sd(XMMRegister dst, XMMRegister src);
void cvtsd2ss(XMMRegister dst, XMMRegister src);
@ -969,12 +1009,14 @@ class Assembler : public AssemblerBase {
void movdbl(XMMRegister dst, const Operand& src);
void movdbl(const Operand& dst, XMMRegister src);
void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
void movd(XMMRegister dst, const Operand& src);
void movd(const Operand& src, XMMRegister dst);
void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
void movd(const Operand& dst, XMMRegister src);
void movsd(XMMRegister dst, XMMRegister src);
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& src, XMMRegister dst);
void movss(const Operand& dst, XMMRegister src);
void movss(XMMRegister dst, XMMRegister src);
void pand(XMMRegister dst, XMMRegister src);
@ -987,11 +1029,17 @@ class Assembler : public AssemblerBase {
void psrlq(XMMRegister reg, int8_t shift);
void psrlq(XMMRegister dst, XMMRegister src);
void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
void pextrd(Register dst, XMMRegister src, int8_t offset) {
pextrd(Operand(dst), src, offset);
}
void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
void pinsrd(XMMRegister dst, Register src, int8_t offset) {
pinsrd(dst, Operand(src), offset);
}
void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
// Parallel XMM operations.
void movntdqa(XMMRegister src, const Operand& dst);
void movntdqa(XMMRegister dst, const Operand& src);
void movntdq(const Operand& dst, XMMRegister src);
// Prefetch src position into cache level.
// Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
@ -1045,6 +1093,9 @@ class Assembler : public AssemblerBase {
static const int kMaximalBufferSize = 512*MB;
static const int kMinimalBufferSize = 4*KB;
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
protected:
bool emit_debug_code() const { return emit_debug_code_; }
@ -1057,9 +1108,8 @@ class Assembler : public AssemblerBase {
byte* addr_at(int pos) { return buffer_ + pos; }
private:
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
uint32_t long_at(int pos) {
return *reinterpret_cast<uint32_t*>(addr_at(pos));
}

1031
deps/v8/src/ia32/builtins-ia32.cc

File diff suppressed because it is too large

1112
deps/v8/src/ia32/code-stubs-ia32.cc

File diff suppressed because it is too large

291
deps/v8/src/ia32/code-stubs-ia32.h

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -60,6 +60,25 @@ class TranscendentalCacheStub: public CodeStub {
};
class StoreBufferOverflowStub: public CodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
void Generate(MacroAssembler* masm);
virtual bool IsPregenerated() { return true; }
static void GenerateFixedRegStubsAheadOfTime();
virtual bool SometimesSetsUpAFrame() { return false; }
private:
SaveFPRegsMode save_doubles_;
Major MajorKey() { return StoreBufferOverflow; }
int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
};
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op,
@ -418,6 +437,8 @@ class StringDictionaryLookupStub: public CodeStub {
Register r0,
Register r1);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
@ -430,7 +451,7 @@ class StringDictionaryLookupStub: public CodeStub {
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
Major MajorKey() { return StringDictionaryNegativeLookup; }
Major MajorKey() { return StringDictionaryLookup; }
int MinorKey() {
return DictionaryBits::encode(dictionary_.code()) |
@ -451,6 +472,272 @@ class StringDictionaryLookupStub: public CodeStub {
};
class RecordWriteStub: public CodeStub {
public:
RecordWriteStub(Register object,
Register value,
Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
: object_(object),
value_(value),
address_(address),
remembered_set_action_(remembered_set_action),
save_fp_regs_mode_(fp_mode),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
}
enum Mode {
STORE_BUFFER_ONLY,
INCREMENTAL,
INCREMENTAL_COMPACTION
};
virtual bool IsPregenerated();
static void GenerateFixedRegStubsAheadOfTime();
virtual bool SometimesSetsUpAFrame() { return false; }
static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8.
static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32.
static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32.
static Mode GetMode(Code* stub) {
byte first_instruction = stub->instruction_start()[0];
byte second_instruction = stub->instruction_start()[2];
if (first_instruction == kTwoByteJumpInstruction) {
return INCREMENTAL;
}
ASSERT(first_instruction == kTwoByteNopInstruction);
if (second_instruction == kFiveByteJumpInstruction) {
return INCREMENTAL_COMPACTION;
}
ASSERT(second_instruction == kFiveByteNopInstruction);
return STORE_BUFFER_ONLY;
}
static void Patch(Code* stub, Mode mode) {
switch (mode) {
case STORE_BUFFER_ONLY:
ASSERT(GetMode(stub) == INCREMENTAL ||
GetMode(stub) == INCREMENTAL_COMPACTION);
stub->instruction_start()[0] = kTwoByteNopInstruction;
stub->instruction_start()[2] = kFiveByteNopInstruction;
break;
case INCREMENTAL:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
stub->instruction_start()[0] = kTwoByteJumpInstruction;
break;
case INCREMENTAL_COMPACTION:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
stub->instruction_start()[0] = kTwoByteNopInstruction;
stub->instruction_start()[2] = kFiveByteJumpInstruction;
break;
}
ASSERT(GetMode(stub) == mode);
CPU::FlushICache(stub->instruction_start(), 7);
}
private:
// This is a helper class for freeing up 3 scratch registers, where the third
// is always ecx (needed for shift operations). The input is two registers
// that must be preserved and one scratch register provided by the caller.
class RegisterAllocation {
public:
RegisterAllocation(Register object,
Register address,
Register scratch0)
: object_orig_(object),
address_orig_(address),
scratch0_orig_(scratch0),
object_(object),
address_(address),
scratch0_(scratch0) {
ASSERT(!AreAliased(scratch0, object, address, no_reg));
scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_);
if (scratch0.is(ecx)) {
scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_);
}
if (object.is(ecx)) {
object_ = GetRegThatIsNotEcxOr(address_, scratch0_, scratch1_);
}
if (address.is(ecx)) {
address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_);
}
ASSERT(!AreAliased(scratch0_, object_, address_, ecx));
}
void Save(MacroAssembler* masm) {
ASSERT(!address_orig_.is(object_));
ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
// We don't have to save scratch0_orig_ because it was given to us as
// a scratch register. But if we had to switch to a different reg then
// we should save the new scratch0_.
if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
if (!ecx.is(scratch0_orig_) &&
!ecx.is(object_orig_) &&
!ecx.is(address_orig_)) {
masm->push(ecx);
}
masm->push(scratch1_);
if (!address_.is(address_orig_)) {
masm->push(address_);
masm->mov(address_, address_orig_);
}
if (!object_.is(object_orig_)) {
masm->push(object_);
masm->mov(object_, object_orig_);
}
}
void Restore(MacroAssembler* masm) {
// These will have been preserved the entire time, so we just need to move
// them back. Only in one case is the orig_ reg different from the plain
// one, since only one of them can alias with ecx.
if (!object_.is(object_orig_)) {
masm->mov(object_orig_, object_);
masm->pop(object_);
}
if (!address_.is(address_orig_)) {
masm->mov(address_orig_, address_);
masm->pop(address_);
}
masm->pop(scratch1_);
if (!ecx.is(scratch0_orig_) &&
!ecx.is(object_orig_) &&
!ecx.is(address_orig_)) {
masm->pop(ecx);
}
if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
}
// If we have to call into C then we need to save and restore all caller-
// saved registers that were not already preserved. The caller saved
// registers are eax, ecx and edx. The three scratch registers (incl. ecx)
// will be restored by other means so we don't bother pushing them here.
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
if (mode == kSaveFPRegs) {
CpuFeatures::Scope scope(SSE2);
masm->sub(esp,
Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
// Save all XMM registers except XMM0.
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
masm->movdbl(Operand(esp, (i - 1) * kDoubleSize), reg);
}
}
}
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
CpuFeatures::Scope scope(SSE2);
// Restore all XMM registers except XMM0.
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize));
}
masm->add(esp,
Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
}
if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
}
inline Register object() { return object_; }
inline Register address() { return address_; }
inline Register scratch0() { return scratch0_; }
inline Register scratch1() { return scratch1_; }
private:
Register object_orig_;
Register address_orig_;
Register scratch0_orig_;
Register object_;
Register address_;
Register scratch0_;
Register scratch1_;
// Third scratch register is always ecx.
Register GetRegThatIsNotEcxOr(Register r1,
Register r2,
Register r3) {
for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(ecx)) continue;
if (candidate.is(r1)) continue;
if (candidate.is(r2)) continue;
if (candidate.is(r3)) continue;
return candidate;
}
UNREACHABLE();
return no_reg;
}
friend class RecordWriteStub;
};
enum OnNoNeedToInformIncrementalMarker {
kReturnOnNoNeedToInformIncrementalMarker,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
}
;
void Generate(MacroAssembler* masm);
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
Major MajorKey() { return RecordWrite; }
int MinorKey() {
return ObjectBits::encode(object_.code()) |
ValueBits::encode(value_.code()) |
AddressBits::encode(address_.code()) |
RememberedSetActionBits::encode(remembered_set_action_) |
SaveFPRegsModeBits::encode(save_fp_regs_mode_);
}
bool MustBeInStubCache() {
// All stubs must be registered in the stub cache
// otherwise IncrementalMarker would not be able to find
// and patch it.
return true;
}
void Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
class ObjectBits: public BitField<int, 0, 3> {};
class ValueBits: public BitField<int, 3, 3> {};
class AddressBits: public BitField<int, 6, 3> {};
class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {};
class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 10, 1> {};
Register object_;
Register value_;
Register address_;
RememberedSetAction remembered_set_action_;
SaveFPRegsMode save_fp_regs_mode_;
RegisterAllocation regs_;
};
} } // namespace v8::internal
#endif // V8_IA32_CODE_STUBS_IA32_H_

46
deps/v8/src/ia32/codegen-ia32.cc

@ -39,12 +39,16 @@ namespace internal {
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterInternalFrame();
masm->EnterFrame(StackFrame::INTERNAL);
ASSERT(!masm->has_frame());
masm->set_has_frame(true);
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->LeaveInternalFrame();
masm->LeaveFrame(StackFrame::INTERNAL);
ASSERT(masm->has_frame());
masm->set_has_frame(false);
}
@ -108,14 +112,14 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ mov(edx, dst);
__ and_(edx, 0xF);
__ neg(edx);
__ add(Operand(edx), Immediate(16));
__ add(dst, Operand(edx));
__ add(src, Operand(edx));
__ sub(Operand(count), edx);
__ add(edx, Immediate(16));
__ add(dst, edx);
__ add(src, edx);
__ sub(count, edx);
// edi is now aligned. Check if esi is also aligned.
Label unaligned_source;
__ test(Operand(src), Immediate(0x0F));
__ test(src, Immediate(0x0F));
__ j(not_zero, &unaligned_source);
{
// Copy loop for aligned source and destination.
@ -130,11 +134,11 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ prefetch(Operand(src, 0x20), 1);
__ movdqa(xmm0, Operand(src, 0x00));
__ movdqa(xmm1, Operand(src, 0x10));
__ add(Operand(src), Immediate(0x20));
__ add(src, Immediate(0x20));
__ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1);
__ add(Operand(dst), Immediate(0x20));
__ add(dst, Immediate(0x20));
__ dec(loop_count);
__ j(not_zero, &loop);
@ -142,12 +146,12 @@ OS::MemCopyFunction CreateMemCopyFunction() {
// At most 31 bytes to copy.
Label move_less_16;
__ test(Operand(count), Immediate(0x10));
__ test(count, Immediate(0x10));
__ j(zero, &move_less_16);
__ movdqa(xmm0, Operand(src, 0));
__ add(Operand(src), Immediate(0x10));
__ add(src, Immediate(0x10));
__ movdqa(Operand(dst, 0), xmm0);
__ add(Operand(dst), Immediate(0x10));
__ add(dst, Immediate(0x10));
__ bind(&move_less_16);
// At most 15 bytes to copy. Copy 16 bytes at end of string.
@ -176,11 +180,11 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ prefetch(Operand(src, 0x20), 1);
__ movdqu(xmm0, Operand(src, 0x00));
__ movdqu(xmm1, Operand(src, 0x10));
__ add(Operand(src), Immediate(0x20));
__ add(src, Immediate(0x20));
__ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1);
__ add(Operand(dst), Immediate(0x20));
__ add(dst, Immediate(0x20));
__ dec(loop_count);
__ j(not_zero, &loop);
@ -188,12 +192,12 @@ OS::MemCopyFunction CreateMemCopyFunction() {
// At most 31 bytes to copy.
Label move_less_16;
__ test(Operand(count), Immediate(0x10));
__ test(count, Immediate(0x10));
__ j(zero, &move_less_16);
__ movdqu(xmm0, Operand(src, 0));
__ add(Operand(src), Immediate(0x10));
__ add(src, Immediate(0x10));
__ movdqa(Operand(dst, 0), xmm0);
__ add(Operand(dst), Immediate(0x10));
__ add(dst, Immediate(0x10));
__ bind(&move_less_16);
// At most 15 bytes to copy. Copy 16 bytes at end of string.
@ -228,10 +232,10 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ mov(edx, dst);
__ and_(edx, 0x03);
__ neg(edx);
__ add(Operand(edx), Immediate(4)); // edx = 4 - (dst & 3)
__ add(dst, Operand(edx));
__ add(src, Operand(edx));
__ sub(Operand(count), edx);
__ add(edx, Immediate(4)); // edx = 4 - (dst & 3)
__ add(dst, edx);
__ add(src, edx);
__ sub(count, edx);
// edi is now aligned, ecx holds number of remaning bytes to copy.
__ mov(edx, count);

95
deps/v8/src/ia32/debug-ia32.cc

@ -100,63 +100,64 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList non_object_regs,
bool convert_call_to_jmp) {
// Enter an internal frame.
__ EnterInternalFrame();
// Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values
// are stored as a smi causing it to be untouched by GC.
ASSERT((object_regs & ~kJSCallerSaved) == 0);
ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
ASSERT((object_regs & non_object_regs) == 0);
for (int i = 0; i < kNumJSCallerSaved; i++) {
int r = JSCallerSavedCode(i);
Register reg = { r };
if ((object_regs & (1 << r)) != 0) {
__ push(reg);
}
if ((non_object_regs & (1 << r)) != 0) {
if (FLAG_debug_code) {
__ test(reg, Immediate(0xc0000000));
__ Assert(zero, "Unable to encode value as smi");
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values
// are stored as a smi causing it to be untouched by GC.
ASSERT((object_regs & ~kJSCallerSaved) == 0);
ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
ASSERT((object_regs & non_object_regs) == 0);
for (int i = 0; i < kNumJSCallerSaved; i++) {
int r = JSCallerSavedCode(i);
Register reg = { r };
if ((object_regs & (1 << r)) != 0) {
__ push(reg);
}
if ((non_object_regs & (1 << r)) != 0) {
if (FLAG_debug_code) {
__ test(reg, Immediate(0xc0000000));
__ Assert(zero, "Unable to encode value as smi");
}
__ SmiTag(reg);
__ push(reg);
}
__ SmiTag(reg);
__ push(reg);
}
}
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
__ Set(eax, Immediate(0)); // No arguments.
__ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
CEntryStub ceb(1);
__ CallStub(&ceb);
// Restore the register values containing object pointers from the expression
// stack.
for (int i = kNumJSCallerSaved; --i >= 0;) {
int r = JSCallerSavedCode(i);
Register reg = { r };
if (FLAG_debug_code) {
__ Set(reg, Immediate(kDebugZapValue));
}
if ((object_regs & (1 << r)) != 0) {
__ pop(reg);
}
if ((non_object_regs & (1 << r)) != 0) {
__ pop(reg);
__ SmiUntag(reg);
__ Set(eax, Immediate(0)); // No arguments.
__ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
CEntryStub ceb(1);
__ CallStub(&ceb);
// Restore the register values containing object pointers from the
// expression stack.
for (int i = kNumJSCallerSaved; --i >= 0;) {
int r = JSCallerSavedCode(i);
Register reg = { r };
if (FLAG_debug_code) {
__ Set(reg, Immediate(kDebugZapValue));
}
if ((object_regs & (1 << r)) != 0) {
__ pop(reg);
}
if ((non_object_regs & (1 << r)) != 0) {
__ pop(reg);
__ SmiUntag(reg);
}
}
}
// Get rid of the internal frame.
__ LeaveInternalFrame();
// Get rid of the internal frame.
}
// If this call did not replace a call but patched other code then there will
// be an unwanted return address left on the stack. Here we get rid of that.
if (convert_call_to_jmp) {
__ add(Operand(esp), Immediate(kPointerSize));
__ add(esp, Immediate(kPointerSize));
}
// Now that the break point has been handled, resume normal execution by
@ -298,7 +299,7 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
// Re-run JSFunction, edi is function, esi is context.
__ jmp(Operand(edx));
__ jmp(edx);
}
const bool Debug::kFrameDropperSupported = true;

100
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -116,7 +116,7 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
new_reloc->GetDataStartAddress() + padding, 0);
intptr_t comment_string
= reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string);
RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL);
for (int i = 0; i < additional_comments; ++i) {
#ifdef DEBUG
byte* pos_before = reloc_info_writer.pos();
@ -174,7 +174,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// We use RUNTIME_ENTRY for deoptimization bailouts.
RelocInfo rinfo(curr_address + 1, // 1 after the call opcode.
RelocInfo::RUNTIME_ENTRY,
reinterpret_cast<intptr_t>(deopt_entry));
reinterpret_cast<intptr_t>(deopt_entry),
NULL);
reloc_info_writer.Write(&rinfo);
ASSERT_GE(reloc_info_writer.pos(),
reloc_info->address() + ByteArray::kHeaderSize);
@ -205,6 +206,11 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
node->set_next(data->deoptimizing_code_list_);
data->deoptimizing_code_list_ = node;
// We might be in the middle of incremental marking with compaction.
// Tell collector to treat this code object in a special way and
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(code);
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
@ -221,7 +227,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
Code* check_code,
Code* replacement_code) {
Address call_target_address = pc_after - kIntSize;
@ -250,6 +257,13 @@ void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
*(call_target_address - 2) = 0x90; // nop
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
RelocInfo rinfo(call_target_address,
RelocInfo::CODE_TARGET,
0,
unoptimized_code);
unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode(
unoptimized_code, &rinfo, replacement_code);
}
@ -268,6 +282,9 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
*(call_target_address - 2) = 0x07; // offset
Assembler::set_target_address_at(call_target_address,
check_code->entry());
check_code->GetHeap()->incremental_marking()->
RecordCodeTargetPatch(call_target_address, check_code);
}
@ -415,7 +432,14 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
// Setup the frame pointer and the context pointer.
output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code()));
// All OSR stack frames are dynamically aligned to an 8-byte boundary.
int frame_pointer = input_->GetRegister(ebp.code());
if ((frame_pointer & 0x4) == 0) {
// Return address at FP + 4 should be aligned, so FP mod 8 should be 4.
frame_pointer -= kPointerSize;
has_alignment_padding_ = 1;
}
output_[0]->SetRegister(ebp.code(), frame_pointer);
output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
unsigned pc_offset = data->OsrPcOffset()->value();
@ -480,9 +504,11 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// top address and the current frame's size.
uint32_t top_address;
if (is_bottommost) {
// 2 = context and function in the frame.
top_address =
input_->GetRegister(ebp.code()) - (2 * kPointerSize) - height_in_bytes;
// If the optimized frame had alignment padding, adjust the frame pointer
// to point to the new position of the old frame pointer after padding
// is removed. Subtract 2 * kPointerSize for the context and function slots.
top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
height_in_bytes + has_alignment_padding_ * kPointerSize;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
}
@ -533,7 +559,9 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
}
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
ASSERT(!is_bottommost || input_->GetRegister(ebp.code()) == fp_value);
ASSERT(!is_bottommost ||
input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize
== fp_value);
output_frame->SetFp(fp_value);
if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
if (FLAG_trace_deopt) {
@ -638,7 +666,7 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize = kDoubleSize *
XMMRegister::kNumAllocatableRegisters;
__ sub(Operand(esp), Immediate(kDoubleRegsSize));
__ sub(esp, Immediate(kDoubleRegsSize));
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
@ -662,7 +690,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
__ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
}
__ sub(edx, Operand(ebp));
__ sub(edx, ebp);
__ neg(edx);
// Allocate a new deoptimizer object.
@ -675,7 +703,10 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
__ mov(Operand(esp, 5 * kPointerSize),
Immediate(ExternalReference::isolate_address()));
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
}
// Preserve deoptimizer object in register eax and get the input
// frame descriptor pointer.
@ -698,15 +729,15 @@ void Deoptimizer::EntryGenerator::Generate() {
// Remove the bailout id and the double registers from the stack.
if (type() == EAGER) {
__ add(Operand(esp), Immediate(kDoubleRegsSize + kPointerSize));
__ add(esp, Immediate(kDoubleRegsSize + kPointerSize));
} else {
__ add(Operand(esp), Immediate(kDoubleRegsSize + 2 * kPointerSize));
__ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
}
// Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame.
__ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
__ add(ecx, Operand(esp));
__ add(ecx, esp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
@ -715,18 +746,43 @@ void Deoptimizer::EntryGenerator::Generate() {
Label pop_loop;
__ bind(&pop_loop);
__ pop(Operand(edx, 0));
__ add(Operand(edx), Immediate(sizeof(uint32_t)));
__ cmp(ecx, Operand(esp));
__ add(edx, Immediate(sizeof(uint32_t)));
__ cmp(ecx, esp);
__ j(not_equal, &pop_loop);
// If frame was dynamically aligned, pop padding.
Label sentinel, sentinel_done;
__ pop(ecx);
__ cmp(ecx, Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
__ j(equal, &sentinel);
__ push(ecx);
__ jmp(&sentinel_done);
__ bind(&sentinel);
__ mov(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
Immediate(1));
__ bind(&sentinel_done);
// Compute the output frame in the deoptimizer.
__ push(eax);
__ PrepareCallCFunction(1, ebx);
__ mov(Operand(esp, 0 * kPointerSize), eax);
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate), 1);
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate), 1);
}
__ pop(eax);
if (type() == OSR) {
// If alignment padding is added, push the sentinel.
Label no_osr_padding;
__ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
Immediate(0));
__ j(equal, &no_osr_padding, Label::kNear);
__ push(Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
__ bind(&no_osr_padding);
}
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop;
// Outer loop state: eax = current FrameDescription**, edx = one past the
@ -739,12 +795,12 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(ebx, Operand(eax, 0));
__ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
__ bind(&inner_push_loop);
__ sub(Operand(ecx), Immediate(sizeof(uint32_t)));
__ sub(ecx, Immediate(sizeof(uint32_t)));
__ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
__ test(ecx, Operand(ecx));
__ test(ecx, ecx);
__ j(not_zero, &inner_push_loop);
__ add(Operand(eax), Immediate(kPointerSize));
__ cmp(eax, Operand(edx));
__ add(eax, Immediate(kPointerSize));
__ cmp(eax, edx);
__ j(below, &outer_push_loop);
// In case of OSR, we have to restore the XMM registers.

29
deps/v8/src/ia32/disasm-ia32.cc

@ -55,6 +55,7 @@ struct ByteMnemonic {
static const ByteMnemonic two_operands_instr[] = {
{0x01, "add", OPER_REG_OP_ORDER},
{0x03, "add", REG_OPER_OP_ORDER},
{0x09, "or", OPER_REG_OP_ORDER},
{0x0B, "or", REG_OPER_OP_ORDER},
@ -117,6 +118,19 @@ static const ByteMnemonic short_immediate_instr[] = {
};
// Generally we don't want to generate these because they are subject to partial
// register stalls. They are included for completeness and because the cmp
// variant is used by the RecordWrite stub. Because it does not update the
// register it is not subject to partial register stalls.
static ByteMnemonic byte_immediate_instr[] = {
{0x0c, "or", UNSET_OP_ORDER},
{0x24, "and", UNSET_OP_ORDER},
{0x34, "xor", UNSET_OP_ORDER},
{0x3c, "cmp", UNSET_OP_ORDER},
{-1, "", UNSET_OP_ORDER}
};
static const char* const jump_conditional_mnem[] = {
/*0*/ "jo", "jno", "jc", "jnc",
/*4*/ "jz", "jnz", "jna", "ja",
@ -149,7 +163,8 @@ enum InstructionType {
REGISTER_INSTR,
MOVE_REG_INSTR,
CALL_JUMP_INSTR,
SHORT_IMMEDIATE_INSTR
SHORT_IMMEDIATE_INSTR,
BYTE_IMMEDIATE_INSTR
};
@ -198,6 +213,7 @@ void InstructionTable::Init() {
CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
CopyTable(call_jump_instr, CALL_JUMP_INSTR);
CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
CopyTable(byte_immediate_instr, BYTE_IMMEDIATE_INSTR);
AddJumpConditionalShort();
SetTableRange(REGISTER_INSTR, 0x40, 0x47, "inc");
SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec");
@ -912,6 +928,12 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
}
case BYTE_IMMEDIATE_INSTR: {
AppendToBuffer("%s al, 0x%x", idesc.mnem, data[1]);
data += 2;
break;
}
case NO_INSTR:
processed = false;
break;
@ -1346,11 +1368,6 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += 2;
break;
case 0x2C:
AppendToBuffer("subb eax,0x%x", *reinterpret_cast<uint8_t*>(data+1));
data += 2;
break;
case 0xA9:
AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1));
data += 5;

323
deps/v8/src/ia32/full-codegen-ia32.cc

@ -138,7 +138,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// function calls.
if (info->is_strict_mode() || info->is_native()) {
Label ok;
__ test(ecx, Operand(ecx));
__ test(ecx, ecx);
__ j(zero, &ok, Label::kNear);
// +1 for return address.
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
@ -147,6 +147,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ bind(&ok);
}
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
@ -200,11 +205,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
__ mov(Operand(esi, context_offset), eax);
// Update the write barrier. This clobbers all involved
// registers, so we have use a third register to avoid
// clobbering esi.
__ mov(ecx, esi);
__ RecordWrite(ecx, context_offset, eax, ebx);
// Update the write barrier. This clobbers eax and ebx.
__ RecordWriteContextSlot(esi,
context_offset,
eax,
ebx,
kDontSaveFPRegs);
}
}
}
@ -365,10 +371,10 @@ void FullCodeGenerator::EmitReturnSequence() {
void FullCodeGenerator::verify_stack_height() {
ASSERT(FLAG_verify_stack_height);
__ sub(Operand(ebp), Immediate(kPointerSize * stack_height()));
__ cmp(ebp, Operand(esp));
__ sub(ebp, Immediate(kPointerSize * stack_height()));
__ cmp(ebp, esp);
__ Assert(equal, "Full codegen stack height not as expected.");
__ add(Operand(ebp), Immediate(kPointerSize * stack_height()));
__ add(ebp, Immediate(kPointerSize * stack_height()));
}
@ -597,7 +603,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
ToBooleanStub stub(result_register());
__ push(result_register());
__ CallStub(&stub, condition->test_id());
__ test(result_register(), Operand(result_register()));
__ test(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
}
@ -661,11 +667,12 @@ void FullCodeGenerator::SetVar(Variable* var,
ASSERT(!scratch1.is(src));
MemOperand location = VarOperand(var, scratch0);
__ mov(location, src);
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
int offset = Context::SlotOffset(var->index());
ASSERT(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi));
__ RecordWrite(scratch0, offset, src, scratch1);
__ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
}
}
@ -738,9 +745,14 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ mov(ContextOperand(esi, variable->index()), result_register());
int offset = Context::SlotOffset(variable->index());
__ mov(ebx, esi);
__ RecordWrite(ebx, offset, result_register(), ecx);
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(esi,
Context::SlotOffset(variable->index()),
result_register(),
ecx,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
} else if (mode == Variable::CONST || mode == Variable::LET) {
Comment cmnt(masm_, "[ Declaration");
@ -835,10 +847,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
if (inline_smi_code) {
Label slow_case;
__ mov(ecx, edx);
__ or_(ecx, Operand(eax));
__ or_(ecx, eax);
patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
__ cmp(edx, Operand(eax));
__ cmp(edx, eax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
@ -850,7 +862,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
__ call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
__ test(eax, Operand(eax));
__ test(eax, eax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
@ -939,7 +951,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// For all objects but the receiver, check that the cache is empty.
Label check_prototype;
__ cmp(ecx, Operand(eax));
__ cmp(ecx, eax);
__ j(equal, &check_prototype, Label::kNear);
__ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
__ cmp(edx, isolate()->factory()->empty_fixed_array());
@ -1021,9 +1033,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(ecx); // Enumerable.
__ push(ebx); // Current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
__ test(eax, Operand(eax));
__ test(eax, eax);
__ j(equal, loop_statement.continue_label());
__ mov(ebx, Operand(eax));
__ mov(ebx, eax);
// Update the 'each' property or variable from the possibly filtered
// entry in register ebx.
@ -1047,7 +1059,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Remove the pointers stored on the stack.
__ bind(loop_statement.break_label());
__ add(Operand(esp), Immediate(5 * kPointerSize));
__ add(esp, Immediate(5 * kPointerSize));
decrement_stack_height(ForIn::kElementCount);
// Exit and decrement the loop depth.
@ -1195,10 +1207,16 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == Variable::DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
if (local->mode() == Variable::CONST) {
if (local->mode() == Variable::CONST ||
local->mode() == Variable::LET) {
__ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, done);
__ mov(eax, isolate()->factory()->undefined_value());
if (local->mode() == Variable::CONST) {
__ mov(eax, isolate()->factory()->undefined_value());
} else { // Variable::LET
__ push(Immediate(var->name()));
__ CallRuntime(Runtime::kThrowReferenceError, 1);
}
}
__ jmp(done);
}
@ -1480,8 +1498,18 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ mov(FieldOperand(ebx, offset), result_register());
Label no_map_change;
__ JumpIfSmi(result_register(), &no_map_change);
// Update the write barrier for the array store.
__ RecordWrite(ebx, offset, result_register(), ecx);
__ RecordWriteField(ebx, offset, result_register(), ecx,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
__ CheckFastSmiOnlyElements(edi, &no_map_change, Label::kNear);
__ push(Operand(esp, 0));
__ CallRuntime(Runtime::kNonSmiElementStored, 1);
__ bind(&no_map_change);
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
@ -1641,7 +1669,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ pop(edx);
decrement_stack_height();
__ mov(ecx, eax);
__ or_(eax, Operand(edx));
__ or_(eax, edx);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear);
@ -1691,32 +1719,32 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
break;
}
case Token::ADD:
__ add(eax, Operand(ecx));
__ add(eax, ecx);
__ j(overflow, &stub_call);
break;
case Token::SUB:
__ sub(eax, Operand(ecx));
__ sub(eax, ecx);
__ j(overflow, &stub_call);
break;
case Token::MUL: {
__ SmiUntag(eax);
__ imul(eax, Operand(ecx));
__ imul(eax, ecx);
__ j(overflow, &stub_call);
__ test(eax, Operand(eax));
__ test(eax, eax);
__ j(not_zero, &done, Label::kNear);
__ mov(ebx, edx);
__ or_(ebx, Operand(ecx));
__ or_(ebx, ecx);
__ j(negative, &stub_call);
break;
}
case Token::BIT_OR:
__ or_(eax, Operand(ecx));
__ or_(eax, ecx);
break;
case Token::BIT_AND:
__ and_(eax, Operand(ecx));
__ and_(eax, ecx);
break;
case Token::BIT_XOR:
__ xor_(eax, Operand(ecx));
__ xor_(eax, ecx);
break;
default:
UNREACHABLE();
@ -1859,7 +1887,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ mov(location, eax);
if (var->IsContextSlot()) {
__ mov(edx, eax);
__ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx);
int offset = Context::SlotOffset(var->index());
__ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
}
}
@ -1877,7 +1906,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ mov(location, eax);
if (var->IsContextSlot()) {
__ mov(edx, eax);
__ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx);
int offset = Context::SlotOffset(var->index());
__ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
}
} else {
ASSERT(var->IsLookupSlot());
@ -2069,8 +2099,29 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
}
// Record source position for debugger.
SetSourcePosition(expr->position());
// Record call targets in unoptimized code, but not in the snapshot.
bool record_call_target = !Serializer::enabled();
if (record_call_target) {
flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
}
CallFunctionStub stub(arg_count, flags);
__ CallStub(&stub);
if (record_call_target) {
// There is a one element cache in the instruction stream.
#ifdef DEBUG
int return_site_offset = masm()->pc_offset();
#endif
Handle<Object> uninitialized =
CallFunctionStub::UninitializedSentinel(isolate());
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
__ test(eax, Immediate(cell));
// Patching code in the stub assumes the opcode is 1 byte and there is
// word for a pointer in the operand.
ASSERT(masm()->pc_offset() - return_site_offset >= 1 + kPointerSize);
}
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@ -2438,9 +2489,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
STATIC_ASSERT(kPointerSize == 4);
__ lea(ecx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
// Calculate location of the first key name.
__ add(Operand(ebx),
Immediate(FixedArray::kHeaderSize +
DescriptorArray::kFirstIndex * kPointerSize));
__ add(ebx,
Immediate(FixedArray::kHeaderSize +
DescriptorArray::kFirstIndex * kPointerSize));
// Loop through all the keys in the descriptor array. If one of these is the
// symbol valueOf the result is false.
Label entry, loop;
@ -2449,9 +2500,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ mov(edx, FieldOperand(ebx, 0));
__ cmp(edx, FACTORY->value_of_symbol());
__ j(equal, if_false);
__ add(Operand(ebx), Immediate(kPointerSize));
__ add(ebx, Immediate(kPointerSize));
__ bind(&entry);
__ cmp(ebx, Operand(ecx));
__ cmp(ebx, ecx);
__ j(not_equal, &loop);
// Reload map as register ebx was used as temporary above.
@ -2591,7 +2642,7 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
__ pop(ebx);
decrement_stack_height();
__ cmp(eax, Operand(ebx));
__ cmp(eax, ebx);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@ -2647,20 +2698,24 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
// Assume that there are only two callable types, and one of them is at
// either end of the type range for JS object types. Saves extra comparisons.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax);
// Map is now in eax.
__ j(below, &null);
// As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
// FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
// LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
__ CmpInstanceType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
__ j(above_equal, &function);
// Check if the constructor in the map is a function.
STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
FIRST_SPEC_OBJECT_TYPE + 1);
__ j(equal, &function);
__ CmpInstanceType(eax, LAST_SPEC_OBJECT_TYPE);
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
LAST_SPEC_OBJECT_TYPE - 1);
__ j(equal, &function);
// Assume that there is no larger type.
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
__ mov(eax, FieldOperand(eax, Map::kConstructorOffset));
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
__ j(not_equal, &non_function_constructor);
@ -2741,8 +2796,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
__ movd(xmm1, Operand(ebx));
__ movd(xmm0, Operand(eax));
__ movd(xmm1, ebx);
__ movd(xmm0, eax);
__ cvtss2sd(xmm1, xmm1);
__ xorps(xmm0, xmm1);
__ subsd(xmm0, xmm1);
@ -2843,10 +2898,11 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
// Store the value.
__ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
__ mov(edx, eax);
__ RecordWrite(ebx, JSValue::kValueOffset, edx, ecx);
__ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx, kDontSaveFPRegs);
__ bind(&done);
context()->Plug(eax);
@ -3119,14 +3175,14 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ mov(index_1, Operand(esp, 1 * kPointerSize));
__ mov(index_2, Operand(esp, 0));
__ mov(temp, index_1);
__ or_(temp, Operand(index_2));
__ or_(temp, index_2);
__ JumpIfNotSmi(temp, &slow_case);
// Check that both indices are valid.
__ mov(temp, FieldOperand(object, JSArray::kLengthOffset));
__ cmp(temp, Operand(index_1));
__ cmp(temp, index_1);
__ j(below_equal, &slow_case);
__ cmp(temp, Operand(index_2));
__ cmp(temp, index_2);
__ j(below_equal, &slow_case);
// Bring addresses into index1 and index2.
@ -3139,16 +3195,35 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ mov(Operand(index_2, 0), object);
__ mov(Operand(index_1, 0), temp);
Label new_space;
__ InNewSpace(elements, temp, equal, &new_space);
__ mov(object, elements);
__ RecordWriteHelper(object, index_1, temp);
__ RecordWriteHelper(elements, index_2, temp);
Label no_remembered_set;
__ CheckPageFlag(elements,
temp,
1 << MemoryChunk::SCAN_ON_SCAVENGE,
not_zero,
&no_remembered_set,
Label::kNear);
// Possible optimization: do a check that both values are Smis
// (or them and test against Smi mask.)
// We are swapping two objects in an array and the incremental marker never
// pauses in the middle of scanning a single object. Therefore the
// incremental marker is not disturbed, so we don't need to call the
// RecordWrite stub that notifies the incremental marker.
__ RememberedSetHelper(elements,
index_1,
temp,
kDontSaveFPRegs,
MacroAssembler::kFallThroughAtEnd);
__ RememberedSetHelper(elements,
index_2,
temp,
kDontSaveFPRegs,
MacroAssembler::kFallThroughAtEnd);
__ bind(&no_remembered_set);
__ bind(&new_space);
// We are done. Drop elements from the stack, and return undefined.
__ add(Operand(esp), Immediate(3 * kPointerSize));
__ add(esp, Immediate(3 * kPointerSize));
__ mov(eax, isolate()->factory()->undefined_value());
__ jmp(&done);
@ -3221,11 +3296,11 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
__ pop(left);
Label done, fail, ok;
__ cmp(left, Operand(right));
__ cmp(left, right);
__ j(equal, &ok);
// Fail if either is a non-HeapObject.
__ mov(tmp, left);
__ and_(Operand(tmp), right);
__ and_(tmp, right);
__ JumpIfSmi(tmp, &fail);
__ mov(tmp, FieldOperand(left, HeapObject::kMapOffset));
__ CmpInstanceType(tmp, JS_REGEXP_TYPE);
@ -3316,7 +3391,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
Operand separator_operand = Operand(esp, 2 * kPointerSize);
Operand result_operand = Operand(esp, 1 * kPointerSize);
Operand array_length_operand = Operand(esp, 0);
__ sub(Operand(esp), Immediate(2 * kPointerSize));
__ sub(esp, Immediate(2 * kPointerSize));
__ cld();
// Check that the array is a JSArray
__ JumpIfSmi(array, &bailout);
@ -3352,7 +3427,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
// Live loop registers: index, array_length, string,
// scratch, string_length, elements.
if (FLAG_debug_code) {
__ cmp(index, Operand(array_length));
__ cmp(index, array_length);
__ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin");
}
__ bind(&loop);
@ -3370,8 +3445,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ add(string_length,
FieldOperand(string, SeqAsciiString::kLengthOffset));
__ j(overflow, &bailout);
__ add(Operand(index), Immediate(1));
__ cmp(index, Operand(array_length));
__ add(index, Immediate(1));
__ cmp(index, array_length);
__ j(less, &loop);
// If array_length is 1, return elements[0], a string.
@ -3405,10 +3480,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
// to string_length.
__ mov(scratch, separator_operand);
__ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
__ sub(string_length, Operand(scratch)); // May be negative, temporarily.
__ sub(string_length, scratch); // May be negative, temporarily.
__ imul(scratch, array_length_operand);
__ j(overflow, &bailout);
__ add(string_length, Operand(scratch));
__ add(string_length, scratch);
__ j(overflow, &bailout);
__ shr(string_length, 1);
@ -3449,7 +3524,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(Operand(index), Immediate(1));
__ add(index, Immediate(1));
__ bind(&loop_1_condition);
__ cmp(index, array_length_operand);
__ j(less, &loop_1); // End while (index < length).
@ -3490,7 +3565,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(Operand(index), Immediate(1));
__ add(index, Immediate(1));
__ cmp(index, array_length_operand);
__ j(less, &loop_2); // End while (index < length).
@ -3531,7 +3606,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(Operand(index), Immediate(1));
__ add(index, Immediate(1));
__ cmp(index, array_length_operand);
__ j(less, &loop_3); // End while (index < length).
@ -3543,7 +3618,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ bind(&done);
__ mov(eax, result_operand);
// Drop temp values from the stack, and restore context register.
__ add(Operand(esp), Immediate(3 * kPointerSize));
__ add(esp, Immediate(3 * kPointerSize));
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
decrement_stack_height();
@ -3823,9 +3898,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (ShouldInlineSmiCase(expr->op())) {
if (expr->op() == Token::INC) {
__ add(Operand(eax), Immediate(Smi::FromInt(1)));
__ add(eax, Immediate(Smi::FromInt(1)));
} else {
__ sub(Operand(eax), Immediate(Smi::FromInt(1)));
__ sub(eax, Immediate(Smi::FromInt(1)));
}
__ j(overflow, &stub_call, Label::kNear);
// We could eliminate this smi check if we split the code at
@ -3835,9 +3910,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ bind(&stub_call);
// Call stub. Undo operation first.
if (expr->op() == Token::INC) {
__ sub(Operand(eax), Immediate(Smi::FromInt(1)));
__ sub(eax, Immediate(Smi::FromInt(1)));
} else {
__ add(Operand(eax), Immediate(Smi::FromInt(1)));
__ add(eax, Immediate(Smi::FromInt(1)));
}
}
@ -3956,10 +4031,14 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Handle<String> check,
Label* if_true,
Label* if_false,
Label* fall_through) {
Handle<String> check) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
{ AccumulatorValueContext context(this);
VisitForTypeofValue(expr);
}
@ -3998,8 +4077,11 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Split(not_zero, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, edx);
Split(above_equal, if_true, if_false, fall_through);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, edx);
__ j(equal, if_true);
__ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE);
Split(equal, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(eax, if_false);
if (!FLAG_harmony_typeof) {
@ -4017,18 +4099,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else {
if (if_false != fall_through) __ jmp(if_false);
}
}
void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
Label* if_true,
Label* if_false,
Label* fall_through) {
VisitForAccumulatorValue(expr);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ cmp(eax, isolate()->factory()->undefined_value());
Split(equal, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@ -4036,9 +4107,12 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
SetSourcePosition(expr->position());
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
if (TryLiteralCompare(expr)) return;
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@ -4046,16 +4120,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
context()->Plug(if_true, if_false);
return;
}
Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (expr->op()) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
@ -4071,7 +4138,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ CallStub(&stub);
decrement_stack_height(2);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ test(eax, Operand(eax));
__ test(eax, eax);
// The stub returns 0 for true.
Split(zero, if_true, if_false, fall_through);
break;
@ -4080,11 +4147,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
Condition cc = no_condition;
bool strict = false;
switch (op) {
case Token::EQ_STRICT:
strict = true;
// Fall through
case Token::EQ:
cc = equal;
__ pop(edx);
@ -4120,10 +4184,10 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax));
__ mov(ecx, edx);
__ or_(ecx, eax);
patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
__ cmp(edx, Operand(eax));
__ cmp(edx, eax);
Split(cc, if_true, if_false, NULL);
__ bind(&slow_case);
}
@ -4135,7 +4199,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ test(eax, Operand(eax));
__ test(eax, eax);
Split(cc, if_true, if_false, fall_through);
}
}
@ -4146,7 +4210,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Expression* sub_expr,
NilValue nil) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@ -4154,15 +4220,20 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
VisitForAccumulatorValue(expr->expression());
VisitForAccumulatorValue(sub_expr);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ cmp(eax, isolate()->factory()->null_value());
if (expr->is_strict()) {
Handle<Object> nil_value = nil == kNullValue ?
isolate()->factory()->null_value() :
isolate()->factory()->undefined_value();
__ cmp(eax, nil_value);
if (expr->op() == Token::EQ_STRICT) {
Split(equal, if_true, if_false, fall_through);
} else {
Handle<Object> other_nil_value = nil == kNullValue ?
isolate()->factory()->undefined_value() :
isolate()->factory()->null_value();
__ j(equal, if_true);
__ cmp(eax, isolate()->factory()->undefined_value());
__ cmp(eax, other_nil_value);
__ j(equal, if_true);
__ JumpIfSmi(eax, if_false);
// It can be an undetectable object.
@ -4229,7 +4300,7 @@ void FullCodeGenerator::EnterFinallyBlock() {
// Cook return address on top of stack (smi encoded Code* delta)
ASSERT(!result_register().is(edx));
__ pop(edx);
__ sub(Operand(edx), Immediate(masm_->CodeObject()));
__ sub(edx, Immediate(masm_->CodeObject()));
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ SmiTag(edx);
@ -4245,8 +4316,8 @@ void FullCodeGenerator::ExitFinallyBlock() {
// Uncook return address.
__ pop(edx);
__ SmiUntag(edx);
__ add(Operand(edx), Immediate(masm_->CodeObject()));
__ jmp(Operand(edx));
__ add(edx, Immediate(masm_->CodeObject()));
__ jmp(edx);
}

160
deps/v8/src/ia32/ic-ia32.cc

@ -212,7 +212,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Update write barrier. Make sure not to clobber the value.
__ mov(r1, value);
__ RecordWrite(elements, r0, r1);
__ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
}
@ -326,7 +326,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// Fast case: Do the load.
STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
__ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
__ cmp(Operand(scratch), Immediate(FACTORY->the_hole_value()));
__ cmp(scratch, Immediate(FACTORY->the_hole_value()));
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ j(equal, out_of_range);
@ -394,8 +394,8 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Check if element is in the range of mapped arguments. If not, jump
// to the unmapped lookup with the parameter map in scratch1.
__ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
__ sub(Operand(scratch2), Immediate(Smi::FromInt(2)));
__ cmp(key, Operand(scratch2));
__ sub(scratch2, Immediate(Smi::FromInt(2)));
__ cmp(key, scratch2);
__ j(greater_equal, unmapped_case);
// Load element index and check whether it is the hole.
@ -432,7 +432,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
__ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
__ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
__ cmp(key, Operand(scratch));
__ cmp(key, scratch);
__ j(greater_equal, slow_case);
return FieldOperand(backing_store,
key,
@ -534,7 +534,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ shr(ecx, KeyedLookupCache::kMapHashShift);
__ mov(edi, FieldOperand(eax, String::kHashFieldOffset));
__ shr(edi, String::kHashShift);
__ xor_(ecx, Operand(edi));
__ xor_(ecx, edi);
__ and_(ecx, KeyedLookupCache::kCapacityMask);
// Load the key (consisting of map and symbol) from the cache and
@ -545,7 +545,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ shl(edi, kPointerSizeLog2 + 1);
__ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow);
__ add(Operand(edi), Immediate(kPointerSize));
__ add(edi, Immediate(kPointerSize));
__ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow);
@ -559,12 +559,12 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(edi,
Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
__ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
__ sub(edi, Operand(ecx));
__ sub(edi, ecx);
__ j(above_equal, &property_array_property);
// Load in-object property.
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ add(ecx, Operand(edi));
__ add(ecx, edi);
__ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
@ -651,8 +651,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// Check that it has indexed interceptor and access checks
// are not enabled for this object.
__ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset));
__ and_(Operand(ecx), Immediate(kSlowCaseBitFieldMask));
__ cmp(Operand(ecx), Immediate(1 << Map::kHasIndexedInterceptor));
__ and_(ecx, Immediate(kSlowCaseBitFieldMask));
__ cmp(ecx, Immediate(1 << Map::kHasIndexedInterceptor));
__ j(not_zero, &slow);
// Everything is fine, call runtime.
@ -710,7 +710,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ mov(mapped_location, eax);
__ lea(ecx, mapped_location);
__ mov(edx, eax);
__ RecordWrite(ebx, ecx, edx);
__ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs);
__ Ret();
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in ebx.
@ -719,7 +719,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ mov(unmapped_location, eax);
__ lea(edi, unmapped_location);
__ mov(edx, eax);
__ RecordWrite(ebx, edi, edx);
__ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
__ Ret();
__ bind(&slow);
GenerateMiss(masm, false);
@ -734,7 +734,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label slow, fast, array, extra;
Label slow, fast_object_with_map_check, fast_object_without_map_check;
Label fast_double_with_map_check, fast_double_without_map_check;
Label check_if_double_array, array, extra;
// Check that the object isn't a smi.
__ JumpIfSmi(edx, &slow);
@ -750,22 +752,18 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ CmpInstanceType(edi, JS_ARRAY_TYPE);
__ j(equal, &array);
// Check that the object is some kind of JSObject.
__ CmpInstanceType(edi, FIRST_JS_RECEIVER_TYPE);
__ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
__ j(below, &slow);
__ CmpInstanceType(edi, JS_PROXY_TYPE);
__ j(equal, &slow);
__ CmpInstanceType(edi, JS_FUNCTION_PROXY_TYPE);
__ j(equal, &slow);
// Object case: Check key against length in the elements array.
// eax: value
// edx: JSObject
// ecx: key (a smi)
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode and writable.
__ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
__ j(below, &fast);
// edi: receiver map
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
// Check array bounds. Both the key and the length of FixedArray are smis.
__ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
__ j(below, &fast_object_with_map_check);
// Slow case: call runtime.
__ bind(&slow);
@ -778,16 +776,28 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// eax: value
// edx: receiver, a JSArray
// ecx: key, a smi.
// edi: receiver->elements, a FixedArray
// ebx: receiver->elements, a FixedArray
// edi: receiver map
// flags: compare (ecx, edx.length())
// do not leave holes in the array:
__ j(not_equal, &slow);
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
__ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
__ j(above_equal, &slow);
// Add 1 to receiver->length, and go to fast array write.
__ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
__ cmp(edi, masm->isolate()->factory()->fixed_array_map());
__ j(not_equal, &check_if_double_array);
// Add 1 to receiver->length, and go to common element store code for Objects.
__ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ jmp(&fast_object_without_map_check);
__ bind(&check_if_double_array);
__ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
__ j(not_equal, &slow);
// Add 1 to receiver->length, and go to common element store code for doubles.
__ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ jmp(&fast);
__ jmp(&fast_double_without_map_check);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
@ -796,24 +806,54 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// eax: value
// edx: receiver, a JSArray
// ecx: key, a smi.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
// edi: receiver map
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
// Check the key against the length in the array and fall through to the
// common store code.
__ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // Compare smis.
__ j(above_equal, &extra);
// Fast case: Do the store.
__ bind(&fast);
// Fast case: Do the store, could either Object or double.
__ bind(&fast_object_with_map_check);
// eax: value
// ecx: key (a smi)
// edx: receiver
// edi: FixedArray receiver->elements
__ mov(CodeGenerator::FixedArrayElementOperand(edi, ecx), eax);
// ebx: FixedArray receiver->elements
// edi: receiver map
__ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
__ cmp(edi, masm->isolate()->factory()->fixed_array_map());
__ j(not_equal, &fast_double_with_map_check);
__ bind(&fast_object_without_map_check);
// Smi stores don't require further checks.
Label non_smi_value;
__ JumpIfNotSmi(eax, &non_smi_value);
// It's irrelevant whether array is smi-only or not when writing a smi.
__ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
__ ret(0);
__ bind(&non_smi_value);
// Escape to slow case when writing non-smi into smi-only array.
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
__ CheckFastObjectElements(edi, &slow, Label::kNear);
// Fast elements array, store the value to the elements backing store.
__ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
// Update write barrier for the elements array address.
__ mov(edx, Operand(eax));
__ RecordWrite(edi, 0, edx, ecx);
__ mov(edx, eax); // Preserve the value which is returned.
__ RecordWriteArray(
ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ ret(0);
__ bind(&fast_double_with_map_check);
// Check for fast double array case. If this fails, call through to the
// runtime.
__ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
__ j(not_equal, &slow);
__ bind(&fast_double_without_map_check);
// If the value is a number, store it as a double in the FastDoubleElements
// array.
__ StoreNumberToDoubleElements(eax, ebx, ecx, edx, xmm0, &slow, false);
__ ret(0);
}
@ -951,22 +991,22 @@ static void GenerateCallMiss(MacroAssembler* masm,
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Enter an internal frame.
__ EnterInternalFrame();
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Push the receiver and the name of the function.
__ push(edx);
__ push(ecx);
// Push the receiver and the name of the function.
__ push(edx);
__ push(ecx);
// Call the entry.
CEntryStub stub(1);
__ mov(eax, Immediate(2));
__ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate())));
__ CallStub(&stub);
// Call the entry.
CEntryStub stub(1);
__ mov(eax, Immediate(2));
__ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate())));
__ CallStub(&stub);
// Move result to edi and exit the internal frame.
__ mov(edi, eax);
__ LeaveInternalFrame();
// Move result to edi and exit the internal frame.
__ mov(edi, eax);
}
// Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC.
@ -1111,13 +1151,17 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
__ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
__ EnterInternalFrame();
__ push(ecx); // save the key
__ push(edx); // pass the receiver
__ push(ecx); // pass the key
__ CallRuntime(Runtime::kKeyedGetProperty, 2);
__ pop(ecx); // restore the key
__ LeaveInternalFrame();
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(ecx); // save the key
__ push(edx); // pass the receiver
__ push(ecx); // pass the key
__ CallRuntime(Runtime::kKeyedGetProperty, 2);
__ pop(ecx); // restore the key
// Leave the internal frame.
}
__ mov(edi, eax);
__ jmp(&do_call);

255
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -70,6 +70,17 @@ bool LCodeGen::GenerateCode() {
ASSERT(is_unused());
status_ = GENERATING;
CpuFeatures::Scope scope(SSE2);
CodeStub::GenerateFPStubs();
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
dynamic_frame_alignment_ = chunk()->num_double_slots() > 2 ||
info()->osr_ast_id() != AstNode::kNoNumber;
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
@ -144,6 +155,29 @@ bool LCodeGen::GeneratePrologue() {
__ bind(&ok);
}
if (dynamic_frame_alignment_) {
Label do_not_pad, align_loop;
STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
// Align esp to a multiple of 2 * kPointerSize.
__ test(esp, Immediate(kPointerSize));
__ j(zero, &do_not_pad, Label::kNear);
__ push(Immediate(0));
__ mov(ebx, esp);
// Copy arguments, receiver, and return address.
__ mov(ecx, Immediate(scope()->num_parameters() + 2));
__ bind(&align_loop);
__ mov(eax, Operand(ebx, 1 * kPointerSize));
__ mov(Operand(ebx, 0), eax);
__ add(Operand(ebx), Immediate(kPointerSize));
__ dec(ecx);
__ j(not_zero, &align_loop, Label::kNear);
__ mov(Operand(ebx, 0),
Immediate(isolate()->factory()->frame_alignment_marker()));
__ bind(&do_not_pad);
}
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
@ -204,11 +238,12 @@ bool LCodeGen::GeneratePrologue() {
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
__ mov(Operand(esi, context_offset), eax);
// Update the write barrier. This clobbers all involved
// registers, so we have to use a third register to avoid
// clobbering esi.
__ mov(ecx, esi);
__ RecordWrite(ecx, context_offset, eax, ebx);
// Update the write barrier. This clobbers eax and ebx.
__ RecordWriteContextSlot(esi,
context_offset,
eax,
ebx,
kDontSaveFPRegs);
}
}
Comment(";;; End allocate local context");
@ -260,6 +295,9 @@ bool LCodeGen::GenerateDeferredCode() {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
Comment(";;; Deferred code @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
code->Generate();
__ jmp(code->exit());
}
@ -481,14 +519,18 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr,
LOperand* context) {
ASSERT(context->IsRegister() || context->IsStackSlot());
if (context->IsRegister()) {
if (!ToRegister(context).is(esi)) {
__ mov(esi, ToRegister(context));
}
} else {
// Context is stack slot.
} else if (context->IsStackSlot()) {
__ mov(esi, ToOperand(context));
} else if (context->IsConstantOperand()) {
Handle<Object> literal =
chunk_->LookupLiteral(LConstantOperand::cast(context));
LoadHeapObject(esi, Handle<Context>::cast(literal));
} else {
UNREACHABLE();
}
__ CallRuntimeSaveDoubles(id);
@ -669,7 +711,7 @@ void LCodeGen::RecordSafepoint(
int arguments,
int deoptimization_index) {
ASSERT(kind == expected_safepoint_kind_);
const ZoneList<LOperand*>* operands = pointers->operands();
const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
kind, arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
@ -1200,8 +1242,13 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
ASSERT(instr->result()->IsRegister());
__ Set(ToRegister(instr->result()), Immediate(instr->value()));
Register reg = ToRegister(instr->result());
Handle<Object> handle = instr->value();
if (handle->IsHeapObject()) {
LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
} else {
__ Set(reg, Immediate(handle));
}
}
@ -1577,23 +1624,33 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
}
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
int false_block = chunk_->LookupDestination(instr->false_block_id());
// TODO(fsc): If the expression is known to be a smi, then it's
// definitely not null. Jump to the false block.
// If the expression is known to be untagged or a smi, then it's definitely
// not null, and it can't be a an undetectable object.
if (instr->hydrogen()->representation().IsSpecialization() ||
instr->hydrogen()->type().IsSmi()) {
EmitGoto(false_block);
return;
}
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
__ cmp(reg, factory()->null_value());
if (instr->is_strict()) {
Handle<Object> nil_value = instr->nil() == kNullValue ?
factory()->null_value() :
factory()->undefined_value();
__ cmp(reg, nil_value);
if (instr->kind() == kStrictEquality) {
EmitBranch(true_block, false_block, equal);
} else {
Handle<Object> other_nil_value = instr->nil() == kNullValue ?
factory()->undefined_value() :
factory()->null_value();
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ j(equal, true_label);
__ cmp(reg, factory()->undefined_value());
__ cmp(reg, other_nil_value);
__ j(equal, true_label);
__ JumpIfSmi(reg, false_label);
// Check for undetectable objects by looking in the bit field in
@ -1745,28 +1802,36 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
ASSERT(!input.is(temp));
ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
__ JumpIfSmi(input, is_false);
__ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
__ j(below, is_false);
// Map is now in temp.
// Functions have class 'Function'.
__ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) {
__ j(above_equal, is_true);
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
FIRST_SPEC_OBJECT_TYPE + 1);
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
LAST_SPEC_OBJECT_TYPE - 1);
STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
__ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
__ j(below, is_false);
__ j(equal, is_true);
__ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
__ j(equal, is_true);
} else {
__ j(above_equal, is_false);
// Faster code path to avoid two compares: subtract lower bound from the
// actual type and do a signed compare with the width of the type range.
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
__ mov(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
__ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ cmpb(Operand(temp2),
static_cast<int8_t>(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(above, is_false);
}
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
__ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
// As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
// FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
// LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
if (class_name->IsEqualTo(CStrVector("Object"))) {
@ -1851,9 +1916,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
virtual void Generate() {
codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
}
virtual LInstruction* instr() { return instr_; }
Label* map_check() { return &map_check_; }
private:
LInstanceOfKnownGlobal* instr_;
Label map_check_;
@ -1991,6 +2055,17 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
__ mov(esp, ebp);
__ pop(ebp);
if (dynamic_frame_alignment_) {
Label aligned;
// Frame alignment marker (padding) is below arguments,
// and receiver, so its return-address-relative offset is
// (num_arguments + 2) words.
__ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize),
Immediate(factory()->frame_alignment_marker()));
__ j(not_equal, &aligned);
__ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
__ bind(&aligned);
}
__ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
}
@ -1998,7 +2073,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
__ mov(result, Operand::Cell(instr->hydrogen()->cell()));
if (instr->hydrogen()->check_hole_value()) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(result, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
@ -2019,20 +2094,34 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register object = ToRegister(instr->TempAt(0));
Register address = ToRegister(instr->TempAt(1));
Register value = ToRegister(instr->InputAt(0));
Operand cell_operand = Operand::Cell(instr->hydrogen()->cell());
ASSERT(!value.is(object));
Handle<JSGlobalPropertyCell> cell_handle(instr->hydrogen()->cell());
int offset = JSGlobalPropertyCell::kValueOffset;
__ mov(object, Immediate(cell_handle));
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->check_hole_value()) {
__ cmp(cell_operand, factory()->the_hole_value());
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(FieldOperand(object, offset), factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
// Store the value.
__ mov(cell_operand, value);
__ mov(FieldOperand(object, offset), value);
// Cells are always in the remembered set.
__ RecordWriteField(object,
offset,
value,
address,
kSaveFPRegs,
OMIT_REMEMBERED_SET);
}
@ -2063,7 +2152,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
if (instr->needs_write_barrier()) {
Register temp = ToRegister(instr->TempAt(0));
int offset = Context::SlotOffset(instr->slot_index());
__ RecordWrite(context, offset, value, temp);
__ RecordWriteContextSlot(context, offset, value, temp, kSaveFPRegs);
}
}
@ -2280,16 +2369,14 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
LLoadKeyedFastDoubleElement* instr) {
XMMRegister result = ToDoubleRegister(instr->result());
if (instr->hydrogen()->RequiresHoleCheck()) {
int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
sizeof(kHoleNanLower32);
Operand hole_check_operand = BuildFastArrayOperand(
instr->elements(), instr->key(),
FAST_DOUBLE_ELEMENTS,
offset);
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
DeoptimizeIf(equal, instr->environment());
}
int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
sizeof(kHoleNanLower32);
Operand hole_check_operand = BuildFastArrayOperand(
instr->elements(), instr->key(),
FAST_DOUBLE_ELEMENTS,
offset);
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
DeoptimizeIf(equal, instr->environment());
Operand double_load_operand = BuildFastArrayOperand(
instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
@ -2359,6 +2446,7 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
@ -2680,6 +2768,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
virtual void Generate() {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
virtual LInstruction* instr() { return instr_; }
private:
LUnaryMathOperation* instr_;
};
@ -3005,7 +3094,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ Drop(1);
}
@ -3062,7 +3151,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->needs_write_barrier()) {
Register temp = ToRegister(instr->TempAt(0));
// Update the write barrier for the object for in-object properties.
__ RecordWrite(object, offset, value, temp);
__ RecordWriteField(object, offset, value, temp, kSaveFPRegs);
}
} else {
Register temp = ToRegister(instr->TempAt(0));
@ -3071,7 +3160,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->needs_write_barrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
__ RecordWrite(temp, offset, value, object);
__ RecordWriteField(temp, offset, value, object, kSaveFPRegs);
}
}
}
@ -3130,6 +3219,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
@ -3146,6 +3236,13 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register elements = ToRegister(instr->object());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
// This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
// conversion, so it deopts in that case.
if (instr->hydrogen()->ValueNeedsSmiCheck()) {
__ test(value, Immediate(kSmiTagMask));
DeoptimizeIf(not_zero, instr->environment());
}
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@ -3168,7 +3265,7 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
key,
times_pointer_size,
FixedArray::kHeaderSize));
__ RecordWrite(elements, key, value);
__ RecordWrite(elements, key, value, kSaveFPRegs);
}
}
@ -3212,6 +3309,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@ -3334,6 +3432,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LStringCharFromCode* instr_;
};
@ -3413,6 +3512,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LNumberTagI* instr_;
};
@ -3480,6 +3580,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LNumberTagD* instr_;
};
@ -3581,16 +3682,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
}
class DeferredTaggedToI: public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
private:
LTaggedToI* instr_;
};
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Label done, heap_number;
Register input_reg = ToRegister(instr->InputAt(0));
@ -3672,6 +3763,16 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
class DeferredTaggedToI: public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LTaggedToI* instr_;
};
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
@ -3882,9 +3983,16 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
ASSERT(instr->InputAt(0)->IsRegister());
Operand operand = ToOperand(instr->InputAt(0));
__ cmp(operand, instr->hydrogen()->target());
Handle<JSFunction> target = instr->hydrogen()->target();
if (isolate()->heap()->InNewSpace(*target)) {
Register reg = ToRegister(instr->value());
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(target);
__ cmp(reg, Operand::Cell(cell));
} else {
Operand operand = ToOperand(instr->value());
__ cmp(operand, instr->hydrogen()->target());
}
DeoptimizeIf(not_equal, instr->environment());
}
@ -4188,10 +4296,12 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = not_zero;
} else if (type_name->Equals(heap()->function_symbol())) {
STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
__ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
final_branch_condition = above_equal;
__ CmpObjectType(input, JS_FUNCTION_TYPE, input);
__ j(equal, true_label);
__ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
final_branch_condition = equal;
} else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label);
@ -4303,6 +4413,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LStackCheck* instr_;
};

13
deps/v8/src/ia32/lithium-codegen-ia32.h

@ -58,6 +58,7 @@ class LCodeGen BASE_EMBEDDED {
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
dynamic_frame_alignment_(false),
deferred_(8),
osr_pc_offset_(-1),
deoptimization_reloc_size(),
@ -133,6 +134,10 @@ class LCodeGen BASE_EMBEDDED {
int strict_mode_flag() const {
return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
}
bool dynamic_frame_alignment() const { return dynamic_frame_alignment_; }
void set_dynamic_frame_alignment(bool value) {
dynamic_frame_alignment_ = value;
}
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
@ -297,6 +302,7 @@ class LCodeGen BASE_EMBEDDED {
int inlined_function_count_;
Scope* const scope_;
Status status_;
bool dynamic_frame_alignment_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
@ -346,16 +352,20 @@ class LCodeGen BASE_EMBEDDED {
class LDeferredCode: public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen), external_exit_(NULL) {
: codegen_(codegen),
external_exit_(NULL),
instruction_index_(codegen->current_instruction_) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() { }
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label *exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
@ -366,6 +376,7 @@ class LDeferredCode: public ZoneObject {
Label entry_;
Label exit_;
Label* external_exit_;
int instruction_index_;
};
} } // namespace v8::internal

64
deps/v8/src/ia32/lithium-ia32.cc

@ -214,10 +214,11 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
}
void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
InputAt(0)->PrintTo(stream);
stream->Add(is_strict() ? " === null" : " == null");
stream->Add(kind() == kStrictEquality ? " === " : " == ");
stream->Add(nil() == kNullValue ? "null" : "undefined");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
@ -351,7 +352,11 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
int LChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
if (is_double) spill_slot_count_++;
if (is_double) {
spill_slot_count_ |= 1; // Make it odd, so incrementing makes it even.
spill_slot_count_++;
num_double_slots_++;
}
return spill_slot_count_++;
}
@ -707,7 +712,9 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
instr->set_environment(CreateEnvironment(hydrogen_env));
int argument_index_accumulator = 0;
instr->set_environment(CreateEnvironment(hydrogen_env,
&argument_index_accumulator));
return instr;
}
@ -994,10 +1001,13 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
LEnvironment* LChunkBuilder::CreateEnvironment(
HEnvironment* hydrogen_env,
int* argument_index_accumulator) {
if (hydrogen_env == NULL) return NULL;
LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
int value_count = hydrogen_env->length();
@ -1007,7 +1017,6 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
argument_count_,
value_count,
outer);
int argument_index = 0;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@ -1016,7 +1025,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
op = new LArgument(argument_index++);
op = new LArgument((*argument_index_accumulator)++);
} else {
op = UseAny(value);
}
@ -1471,10 +1480,10 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
}
LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
// We only need a temp register for non-strict compare.
LOperand* temp = instr->is_strict() ? NULL : TempRegister();
return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
return new LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
}
@ -1683,7 +1692,13 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LOperand* value = UseAtStart(instr->value());
// If the target is in new space, we'll emit a global cell compare and so
// want the value in a register. If the target gets promoted before we
// emit code, we will still get the register but will do an immediate
// compare instead of the cell compare. This is safe.
LOperand* value = Isolate::Current()->heap()->InNewSpace(*instr->target())
? UseRegisterAtStart(instr->value())
: UseAtStart(instr->value());
return AssignEnvironment(new LCheckFunction(value));
}
@ -1770,7 +1785,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LLoadGlobalCell* result = new LLoadGlobalCell;
return instr->check_hole_value()
return instr->RequiresHoleCheck()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
@ -1786,8 +1801,10 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LStoreGlobalCell* result =
new LStoreGlobalCell(UseRegisterAtStart(instr->value()));
return instr->check_hole_value() ? AssignEnvironment(result) : result;
new LStoreGlobalCell(UseTempRegister(instr->value()),
TempRegister(),
TempRegister());
return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@ -1808,15 +1825,13 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
LOperand* context;
LOperand* value;
LOperand* temp;
LOperand* context = UseRegister(instr->context());
if (instr->NeedsWriteBarrier()) {
context = UseTempRegister(instr->context());
value = UseTempRegister(instr->value());
temp = TempRegister();
} else {
context = UseRegister(instr->context());
value = UseRegister(instr->value());
temp = NULL;
}
@ -1944,7 +1959,7 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
ASSERT(instr->object()->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32());
LOperand* obj = UseTempRegister(instr->object());
LOperand* obj = UseRegister(instr->object());
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
: UseRegisterAtStart(instr->value());
@ -2021,9 +2036,14 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* obj = needs_write_barrier
? UseTempRegister(instr->object())
: UseRegisterAtStart(instr->object());
LOperand* obj;
if (needs_write_barrier) {
obj = instr->is_in_object()
? UseRegister(instr->object())
: UseTempRegister(instr->object());
} else {
obj = UseRegisterAtStart(instr->object());
}
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())

28
deps/v8/src/ia32/lithium-ia32.h

@ -101,7 +101,7 @@ class LCodeGen;
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsNullAndBranch) \
V(IsNilAndBranch) \
V(IsObjectAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@ -615,17 +615,18 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
};
class LIsNullAndBranch: public LControlInstruction<1, 1> {
class LIsNilAndBranch: public LControlInstruction<1, 1> {
public:
LIsNullAndBranch(LOperand* value, LOperand* temp) {
LIsNilAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
bool is_strict() const { return hydrogen()->is_strict(); }
EqualityKind kind() const { return hydrogen()->kind(); }
NilValue nil() const { return hydrogen()->nil(); }
virtual void PrintDataTo(StringStream* stream);
};
@ -1230,10 +1231,12 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> {
};
class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> {
class LStoreGlobalCell: public LTemplateInstruction<0, 1, 2> {
public:
explicit LStoreGlobalCell(LOperand* value) {
explicit LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
temps_[0] = temp1;
temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
@ -1798,6 +1801,8 @@ class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
};
@ -2070,6 +2075,7 @@ class LChunk: public ZoneObject {
graph_(graph),
instructions_(32),
pointer_maps_(8),
num_double_slots_(0),
inlined_closures_(1) { }
void AddInstruction(LInstruction* instruction, HBasicBlock* block);
@ -2083,6 +2089,8 @@ class LChunk: public ZoneObject {
int ParameterAt(int index);
int GetParameterStackSlot(int index) const;
int spill_slot_count() const { return spill_slot_count_; }
int num_double_slots() const { return num_double_slots_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
@ -2124,6 +2132,7 @@ class LChunk: public ZoneObject {
HGraph* const graph_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
int num_double_slots_;
ZoneList<Handle<JSFunction> > inlined_closures_;
};
@ -2259,7 +2268,8 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment();
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
int* argument_index_accumulator);
void VisitInstruction(HInstruction* current);

737
deps/v8/src/ia32/macro-assembler-ia32.cc

File diff suppressed because it is too large

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save