Browse Source

Downgrade V8 to 3.6.4

v0.7.4-release
Ryan Dahl 13 years ago
parent
commit
0e9c1ca673
  1. 71
      deps/v8/ChangeLog
  2. 45
      deps/v8/Makefile
  3. 3
      deps/v8/SConstruct
  4. 11
      deps/v8/benchmarks/spinning-balls/index.html
  5. 326
      deps/v8/benchmarks/spinning-balls/splay-tree.js
  6. 387
      deps/v8/benchmarks/spinning-balls/v.js
  7. 12
      deps/v8/build/common.gypi
  8. 7
      deps/v8/build/standalone.gypi
  9. 5
      deps/v8/include/v8-debug.h
  10. 42
      deps/v8/include/v8.h
  11. 2
      deps/v8/src/SConscript
  12. 121
      deps/v8/src/api.cc
  13. 24
      deps/v8/src/arm/assembler-arm-inl.h
  14. 12
      deps/v8/src/arm/assembler-arm.cc
  15. 10
      deps/v8/src/arm/assembler-arm.h
  16. 150
      deps/v8/src/arm/builtins-arm.cc
  17. 532
      deps/v8/src/arm/code-stubs-arm.cc
  18. 245
      deps/v8/src/arm/code-stubs-arm.h
  19. 8
      deps/v8/src/arm/codegen-arm.cc
  20. 10
      deps/v8/src/arm/codegen-arm.h
  21. 6
      deps/v8/src/arm/debug-arm.cc
  22. 28
      deps/v8/src/arm/deoptimizer-arm.cc
  23. 10
      deps/v8/src/arm/frames-arm.h
  24. 227
      deps/v8/src/arm/full-codegen-arm.cc
  25. 123
      deps/v8/src/arm/ic-arm.cc
  26. 36
      deps/v8/src/arm/lithium-arm.cc
  27. 16
      deps/v8/src/arm/lithium-arm.h
  28. 176
      deps/v8/src/arm/lithium-codegen-arm.cc
  29. 7
      deps/v8/src/arm/lithium-codegen-arm.h
  30. 560
      deps/v8/src/arm/macro-assembler-arm.cc
  31. 222
      deps/v8/src/arm/macro-assembler-arm.h
  32. 9
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  33. 2
      deps/v8/src/arm/simulator-arm.cc
  34. 269
      deps/v8/src/arm/stub-cache-arm.cc
  35. 151
      deps/v8/src/array.js
  36. 56
      deps/v8/src/assembler.cc
  37. 30
      deps/v8/src/assembler.h
  38. 106
      deps/v8/src/ast.cc
  39. 41
      deps/v8/src/ast.h
  40. 47
      deps/v8/src/bootstrapper.cc
  41. 141
      deps/v8/src/builtins.cc
  42. 12
      deps/v8/src/cached-powers.cc
  43. 45
      deps/v8/src/code-stubs.cc
  44. 117
      deps/v8/src/code-stubs.h
  45. 2
      deps/v8/src/codegen.cc
  46. 77
      deps/v8/src/compiler-intrinsics.h
  47. 13
      deps/v8/src/compiler.cc
  48. 8
      deps/v8/src/compiler.h
  49. 108
      deps/v8/src/contexts.cc
  50. 41
      deps/v8/src/contexts.h
  51. 2
      deps/v8/src/conversions-inl.h
  52. 2
      deps/v8/src/conversions.h
  53. 5
      deps/v8/src/d8-debug.cc
  54. 32
      deps/v8/src/d8.cc
  55. 224
      deps/v8/src/debug.cc
  56. 90
      deps/v8/src/debug.h
  57. 70
      deps/v8/src/deoptimizer.cc
  58. 18
      deps/v8/src/deoptimizer.h
  59. 2
      deps/v8/src/disassembler.cc
  60. 11
      deps/v8/src/elements.cc
  61. 153
      deps/v8/src/execution.cc
  62. 13
      deps/v8/src/execution.h
  63. 7
      deps/v8/src/extensions/gc-extension.cc
  64. 96
      deps/v8/src/factory.cc
  65. 32
      deps/v8/src/factory.h
  66. 38
      deps/v8/src/flag-definitions.h
  67. 76
      deps/v8/src/frames-inl.h
  68. 117
      deps/v8/src/frames.cc
  69. 80
      deps/v8/src/frames.h
  70. 41
      deps/v8/src/full-codegen.cc
  71. 23
      deps/v8/src/full-codegen.h
  72. 4
      deps/v8/src/globals.h
  73. 17
      deps/v8/src/handles.cc
  74. 15
      deps/v8/src/handles.h
  75. 104
      deps/v8/src/heap-inl.h
  76. 1
      deps/v8/src/heap-profiler.cc
  77. 1471
      deps/v8/src/heap.cc
  78. 461
      deps/v8/src/heap.h
  79. 66
      deps/v8/src/hydrogen-instructions.cc
  80. 338
      deps/v8/src/hydrogen-instructions.h
  81. 403
      deps/v8/src/hydrogen.cc
  82. 29
      deps/v8/src/hydrogen.h
  83. 26
      deps/v8/src/ia32/assembler-ia32-inl.h
  84. 83
      deps/v8/src/ia32/assembler-ia32.cc
  85. 88
      deps/v8/src/ia32/assembler-ia32.h
  86. 251
      deps/v8/src/ia32/builtins-ia32.cc
  87. 1074
      deps/v8/src/ia32/code-stubs-ia32.cc
  88. 291
      deps/v8/src/ia32/code-stubs-ia32.h
  89. 46
      deps/v8/src/ia32/codegen-ia32.cc
  90. 13
      deps/v8/src/ia32/debug-ia32.cc
  91. 94
      deps/v8/src/ia32/deoptimizer-ia32.cc
  92. 29
      deps/v8/src/ia32/disasm-ia32.cc
  93. 352
      deps/v8/src/ia32/full-codegen-ia32.cc
  94. 130
      deps/v8/src/ia32/ic-ia32.cc
  95. 237
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  96. 13
      deps/v8/src/ia32/lithium-codegen-ia32.h
  97. 64
      deps/v8/src/ia32/lithium-ia32.cc
  98. 28
      deps/v8/src/ia32/lithium-ia32.h
  99. 723
      deps/v8/src/ia32/macro-assembler-ia32.cc
  100. 218
      deps/v8/src/ia32/macro-assembler-ia32.h

71
deps/v8/ChangeLog

@ -1,74 +1,3 @@
2011-10-13: Version 3.7.0
Fixed array handling for Object.defineOwnProperty (ES5 conformance).
Fixed issue 1757 (string slices of external strings).
Fixed issue 1759 (ARM).
Added flag --noclever-optimizations to disable some things that
caused trouble in the past.
Added flag --stress-compaction for testing.
Added flag --harmony to activate all experimental Harmony features.
2011-10-10: Version 3.6.6
Added a GC pause visualization tool.
Added presubmit=no and werror=no flags to Makefile.
ES5/Test262 conformance improvements.
Fixed compilation issues with GCC 4.5.x (issue 1743).
Bug fixes and performance improvements on all platforms.
2011-10-05: Version 3.6.5
New incremental garbage collector.
Removed the hard heap size limit (soft heap size limit is still
700/1400Mbytes by default).
Implemented ES5 generic Array.prototype.toString (Issue 1361).
V8 now allows surrogate pair codes in decodeURIComponent (Issue 1415).
Fixed x64 RegExp start-of-string bug (Issues 1746, 1748).
Fixed propertyIsEnumerable for numeric properties (Issue 1692).
Fixed the MinGW and Windows 2000 builds.
Fixed "Prototype chain is not searched if named property handler does
not set a property" (Issue 1636).
Made the RegExp.prototype object be a RegExp object (Issue 1217).
Disallowed future reserved words as labels in strict mode.
Fixed string split to correctly coerce the separator to a string
(Issue 1711).
API: Added an optional source length field to the Extension
constructor.
API: Added Debug::DisableAgent to match existing Debug::EnableAgent
(Issue 1573).
Added "native" target to Makefile for the benefit of Linux distros.
Fixed: debugger stops stepping outside evaluate (Issue 1639).
More work on ES-Harmony proxies. Still hidden behind a flag.
Bug fixes and performance improvements on all platforms.
2011-09-15: Version 3.6.4
Fixed d8's broken readline history.

45
deps/v8/Makefile

@ -32,7 +32,6 @@ LINK ?= "g++"
OUTDIR ?= out
TESTJOBS ?= -j16
GYPFLAGS ?=
TESTFLAGS ?=
# Special build flags. Use them like this: "make library=shared"
@ -51,10 +50,6 @@ endif
ifeq ($(disassembler), on)
GYPFLAGS += -Dv8_enable_disassembler=1
endif
# objectprint=on
ifeq ($(objectprint), on)
GYPFLAGS += -Dv8_object_print=1
endif
# snapshot=off
ifeq ($(snapshot), off)
GYPFLAGS += -Dv8_use_snapshot='false'
@ -77,21 +72,12 @@ endif
ifdef soname_version
GYPFLAGS += -Dsoname_version=$(soname_version)
endif
# werror=no
ifeq ($(werror), no)
GYPFLAGS += -Dwerror=''
endif
# presubmit=no
ifeq ($(presubmit), no)
TESTFLAGS += --no-presubmit
endif
# ----------------- available targets: --------------------
# - "dependencies": pulls in external dependencies (currently: GYP)
# - any arch listed in ARCHES (see below)
# - any mode listed in MODES
# - every combination <arch>.<mode>, e.g. "ia32.release"
# - "native": current host's architecture, release mode
# - any of the above with .check appended, e.g. "ia32.release.check"
# - default (no target specified): build all ARCHES and MODES
# - "check": build all targets and run all tests
@ -117,7 +103,7 @@ CHECKS = $(addsuffix .check,$(BUILDS))
# File where previously used GYPFLAGS are stored.
ENVFILE = $(OUTDIR)/environment
.PHONY: all check clean dependencies $(ENVFILE).new native \
.PHONY: all check clean dependencies $(ENVFILE).new \
$(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
$(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES))
@ -138,31 +124,21 @@ $(BUILDS): $(OUTDIR)/Makefile-$$(basename $$@)
python -c "print raw_input().capitalize()") \
builddir="$(shell pwd)/$(OUTDIR)/$@"
native: $(OUTDIR)/Makefile-native
@$(MAKE) -C "$(OUTDIR)" -f Makefile-native \
CXX="$(CXX)" LINK="$(LINK)" BUILDTYPE=Release \
builddir="$(shell pwd)/$(OUTDIR)/$@"
# Test targets.
check: all
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
$(TESTFLAGS)
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR)
$(addsuffix .check,$(MODES)): $$(basename $$@)
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
--mode=$(basename $@) $(TESTFLAGS)
--mode=$(basename $@)
$(addsuffix .check,$(ARCHES)): $$(basename $$@)
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch=$(basename $@) $(TESTFLAGS)
--arch=$(basename $@)
$(CHECKS): $$(basename $$@)
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@) $(TESTFLAGS)
native.check: native
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR)/native \
--arch-and-mode=. $(TESTFLAGS)
--arch-and-mode=$(basename $@)
# Clean targets. You can clean each architecture individually, or everything.
$(addsuffix .clean,$(ARCHES)):
@ -171,12 +147,7 @@ $(addsuffix .clean,$(ARCHES)):
rm -rf $(OUTDIR)/$(basename $@).debug
find $(OUTDIR) -regex '.*\(host\|target\)-$(basename $@)\.mk' -delete
native.clean:
rm -f $(OUTDIR)/Makefile-native
rm -rf $(OUTDIR)/native
find $(OUTDIR) -regex '.*\(host\|target\)-native\.mk' -delete
clean: $(addsuffix .clean,$(ARCHES)) native.clean
clean: $(addsuffix .clean,$(ARCHES))
# GYP file generation targets.
$(OUTDIR)/Makefile-ia32: $(GYPFILES) $(ENVFILE)
@ -194,10 +165,6 @@ $(OUTDIR)/Makefile-arm: $(GYPFILES) $(ENVFILE)
-Ibuild/standalone.gypi --depth=. -Ibuild/armu.gypi \
-S-arm $(GYPFLAGS)
$(OUTDIR)/Makefile-native: $(GYPFILES) $(ENVFILE)
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -S-native $(GYPFLAGS)
# Replaces the old with the new environment file if they're different, which
# will trigger GYP to regenerate Makefiles.
$(ENVFILE): $(ENVFILE).new

3
deps/v8/SConstruct

@ -288,6 +288,7 @@ V8_EXTRA_FLAGS = {
'gcc': {
'all': {
'WARNINGFLAGS': ['-Wall',
'-Werror',
'-W',
'-Wno-unused-parameter',
'-Wnon-virtual-dtor']
@ -381,7 +382,7 @@ MKSNAPSHOT_EXTRA_FLAGS = {
DTOA_EXTRA_FLAGS = {
'gcc': {
'all': {
'WARNINGFLAGS': ['-Wno-uninitialized'],
'WARNINGFLAGS': ['-Werror', '-Wno-uninitialized'],
'CCFLAGS': GCC_DTOA_EXTRA_CCFLAGS
}
},

11
deps/v8/benchmarks/spinning-balls/index.html

@ -1,11 +0,0 @@
<html>
<head>
<style>
body { text-align: center; }
</style>
</head>
<body>
<script type="text/javascript" src="splay-tree.js"></script>
<script type="text/javascript" src="v.js"></script>
</body>
</html>

326
deps/v8/benchmarks/spinning-balls/splay-tree.js

@ -1,326 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/**
* Constructs a Splay tree. A splay tree is a self-balancing binary
* search tree with the additional property that recently accessed
* elements are quick to access again. It performs basic operations
* such as insertion, look-up and removal in O(log(n)) amortized time.
*
* @constructor
*/
function SplayTree() {
};
/**
* Pointer to the root node of the tree.
*
* @type {SplayTree.Node}
* @private
*/
SplayTree.prototype.root_ = null;
/**
* @return {boolean} Whether the tree is empty.
*/
SplayTree.prototype.isEmpty = function() {
return !this.root_;
};
/**
* Inserts a node into the tree with the specified key and value if
* the tree does not already contain a node with the specified key. If
* the value is inserted, it becomes the root of the tree.
*
* @param {number} key Key to insert into the tree.
* @param {*} value Value to insert into the tree.
*/
SplayTree.prototype.insert = function(key, value) {
if (this.isEmpty()) {
this.root_ = new SplayTree.Node(key, value);
return;
}
// Splay on the key to move the last node on the search path for
// the key to the root of the tree.
this.splay_(key);
if (this.root_.key == key) {
return;
}
var node = new SplayTree.Node(key, value);
if (key > this.root_.key) {
node.left = this.root_;
node.right = this.root_.right;
this.root_.right = null;
} else {
node.right = this.root_;
node.left = this.root_.left;
this.root_.left = null;
}
this.root_ = node;
};
/**
* Removes a node with the specified key from the tree if the tree
* contains a node with this key. The removed node is returned. If the
* key is not found, an exception is thrown.
*
* @param {number} key Key to find and remove from the tree.
* @return {SplayTree.Node} The removed node.
*/
SplayTree.prototype.remove = function(key) {
if (this.isEmpty()) {
throw Error('Key not found: ' + key);
}
this.splay_(key);
if (this.root_.key != key) {
throw Error('Key not found: ' + key);
}
var removed = this.root_;
if (!this.root_.left) {
this.root_ = this.root_.right;
} else {
var right = this.root_.right;
this.root_ = this.root_.left;
// Splay to make sure that the new root has an empty right child.
this.splay_(key);
// Insert the original right child as the right child of the new
// root.
this.root_.right = right;
}
return removed;
};
/**
* Returns the node having the specified key or null if the tree doesn't contain
* a node with the specified key.
*
* @param {number} key Key to find in the tree.
* @return {SplayTree.Node} Node having the specified key.
*/
SplayTree.prototype.find = function(key) {
if (this.isEmpty()) {
return null;
}
this.splay_(key);
return this.root_.key == key ? this.root_ : null;
};
/**
* @return {SplayTree.Node} Node having the maximum key value.
*/
SplayTree.prototype.findMax = function(opt_startNode) {
if (this.isEmpty()) {
return null;
}
var current = opt_startNode || this.root_;
while (current.right) {
current = current.right;
}
return current;
};
/**
* @return {SplayTree.Node} Node having the maximum key value that
* is less than the specified key value.
*/
SplayTree.prototype.findGreatestLessThan = function(key) {
if (this.isEmpty()) {
return null;
}
// Splay on the key to move the node with the given key or the last
// node on the search path to the top of the tree.
this.splay_(key);
// Now the result is either the root node or the greatest node in
// the left subtree.
if (this.root_.key < key) {
return this.root_;
} else if (this.root_.left) {
return this.findMax(this.root_.left);
} else {
return null;
}
};
/**
* @return {Array<*>} An array containing all the keys of tree's nodes.
*/
SplayTree.prototype.exportKeys = function() {
var result = [];
if (!this.isEmpty()) {
this.root_.traverse_(function(node) { result.push(node.key); });
}
return result;
};
/**
* Perform the splay operation for the given key. Moves the node with
* the given key to the top of the tree. If no node has the given
* key, the last node on the search path is moved to the top of the
* tree. This is the simplified top-down splaying algorithm from:
* "Self-adjusting Binary Search Trees" by Sleator and Tarjan
*
* @param {number} key Key to splay the tree on.
* @private
*/
SplayTree.prototype.splay_ = function(key) {
if (this.isEmpty()) {
return;
}
// Create a dummy node. The use of the dummy node is a bit
// counter-intuitive: The right child of the dummy node will hold
// the L tree of the algorithm. The left child of the dummy node
// will hold the R tree of the algorithm. Using a dummy node, left
// and right will always be nodes and we avoid special cases.
var dummy, left, right;
dummy = left = right = new SplayTree.Node(null, null);
var current = this.root_;
while (true) {
if (key < current.key) {
if (!current.left) {
break;
}
if (key < current.left.key) {
// Rotate right.
var tmp = current.left;
current.left = tmp.right;
tmp.right = current;
current = tmp;
if (!current.left) {
break;
}
}
// Link right.
right.left = current;
right = current;
current = current.left;
} else if (key > current.key) {
if (!current.right) {
break;
}
if (key > current.right.key) {
// Rotate left.
var tmp = current.right;
current.right = tmp.left;
tmp.left = current;
current = tmp;
if (!current.right) {
break;
}
}
// Link left.
left.right = current;
left = current;
current = current.right;
} else {
break;
}
}
// Assemble.
left.right = current.left;
right.left = current.right;
current.left = dummy.right;
current.right = dummy.left;
this.root_ = current;
};
/**
* Constructs a Splay tree node.
*
* @param {number} key Key.
* @param {*} value Value.
*/
SplayTree.Node = function(key, value) {
this.key = key;
this.value = value;
};
/**
* @type {SplayTree.Node}
*/
SplayTree.Node.prototype.left = null;
/**
* @type {SplayTree.Node}
*/
SplayTree.Node.prototype.right = null;
/**
* Performs an ordered traversal of the subtree starting at
* this SplayTree.Node.
*
* @param {function(SplayTree.Node)} f Visitor function.
* @private
*/
SplayTree.Node.prototype.traverse_ = function(f) {
var current = this;
while (current) {
var left = current.left;
if (left) left.traverse_(f);
f(current);
current = current.right;
}
};
SplayTree.prototype.traverseBreadthFirst = function (f) {
if (f(this.root_.value)) return;
var stack = [this.root_];
var length = 1;
while (length > 0) {
var new_stack = new Array(stack.length * 2);
var new_length = 0;
for (var i = 0; i < length; i++) {
var n = stack[i];
var l = n.left;
var r = n.right;
if (l) {
if (f(l.value)) return;
new_stack[new_length++] = l;
}
if (r) {
if (f(r.value)) return;
new_stack[new_length++] = r;
}
}
stack = new_stack;
length = new_length;
}
};

387
deps/v8/benchmarks/spinning-balls/v.js

@ -1,387 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/**
* This function provides requestAnimationFrame in a cross browser way.
* http://paulirish.com/2011/requestanimationframe-for-smart-animating/
*/
if ( !window.requestAnimationFrame ) {
window.requestAnimationFrame = ( function() {
return window.webkitRequestAnimationFrame ||
window.mozRequestAnimationFrame ||
window.oRequestAnimationFrame ||
window.msRequestAnimationFrame ||
function(callback, element) {
window.setTimeout( callback, 1000 / 60 );
};
} )();
}
var kNPoints = 8000;
var kNModifications = 20;
var kNVisiblePoints = 200;
var kDecaySpeed = 20;
var kPointRadius = 4;
var kInitialLifeForce = 100;
var livePoints = void 0;
var dyingPoints = void 0;
var scene = void 0;
var renderingStartTime = void 0;
var scene = void 0;
var pausePlot = void 0;
var splayTree = void 0;
function Point(x, y, z, payload) {
this.x = x;
this.y = y;
this.z = z;
this.next = null;
this.prev = null;
this.payload = payload;
this.lifeForce = kInitialLifeForce;
}
Point.prototype.color = function () {
return "rgba(0, 0, 0, " + (this.lifeForce / kInitialLifeForce) + ")";
};
Point.prototype.decay = function () {
this.lifeForce -= kDecaySpeed;
return this.lifeForce <= 0;
};
function PointsList() {
this.head = null;
this.count = 0;
}
PointsList.prototype.add = function (point) {
if (this.head !== null) this.head.prev = point;
point.next = this.head;
this.head = point;
this.count++;
}
PointsList.prototype.remove = function (point) {
if (point.next !== null) {
point.next.prev = point.prev;
}
if (point.prev !== null) {
point.prev.next = point.next;
} else {
this.head = point.next;
}
this.count--;
}
function GeneratePayloadTree(depth, tag) {
if (depth == 0) {
return {
array : [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ],
string : 'String for key ' + tag + ' in leaf node'
};
} else {
return {
left: GeneratePayloadTree(depth - 1, tag),
right: GeneratePayloadTree(depth - 1, tag)
};
}
}
// To make the benchmark results predictable, we replace Math.random
// with a 100% deterministic alternative.
Math.random = (function() {
var seed = 49734321;
return function() {
// Robert Jenkins' 32 bit integer hash function.
seed = ((seed + 0x7ed55d16) + (seed << 12)) & 0xffffffff;
seed = ((seed ^ 0xc761c23c) ^ (seed >>> 19)) & 0xffffffff;
seed = ((seed + 0x165667b1) + (seed << 5)) & 0xffffffff;
seed = ((seed + 0xd3a2646c) ^ (seed << 9)) & 0xffffffff;
seed = ((seed + 0xfd7046c5) + (seed << 3)) & 0xffffffff;
seed = ((seed ^ 0xb55a4f09) ^ (seed >>> 16)) & 0xffffffff;
return (seed & 0xfffffff) / 0x10000000;
};
})();
function GenerateKey() {
// The benchmark framework guarantees that Math.random is
// deterministic; see base.js.
return Math.random();
}
function CreateNewPoint() {
// Insert new node with a unique key.
var key;
do { key = GenerateKey(); } while (splayTree.find(key) != null);
var point = new Point(Math.random() * 40 - 20,
Math.random() * 40 - 20,
Math.random() * 40 - 20,
GeneratePayloadTree(5, "" + key));
livePoints.add(point);
splayTree.insert(key, point);
return key;
}
function ModifyPointsSet() {
if (livePoints.count < kNPoints) {
for (var i = 0; i < kNModifications; i++) {
CreateNewPoint();
}
} else if (kNModifications === 20) {
kNModifications = 80;
kDecay = 30;
}
for (var i = 0; i < kNModifications; i++) {
var key = CreateNewPoint();
var greatest = splayTree.findGreatestLessThan(key);
if (greatest == null) {
var point = splayTree.remove(key).value;
} else {
var point = splayTree.remove(greatest.key).value;
}
livePoints.remove(point);
point.payload = null;
dyingPoints.add(point);
}
}
function PausePlot(width, height, size) {
var canvas = document.createElement("canvas");
canvas.width = this.width = width;
canvas.height = this.height = height;
document.body.appendChild(canvas);
this.ctx = canvas.getContext('2d');
this.maxPause = 0;
this.size = size;
// Initialize cyclic buffer for pauses.
this.pauses = new Array(this.size);
this.start = this.size;
this.idx = 0;
}
PausePlot.prototype.addPause = function (p) {
if (this.idx === this.size) {
this.idx = 0;
}
if (this.idx === this.start) {
this.start++;
}
if (this.start === this.size) {
this.start = 0;
}
this.pauses[this.idx++] = p;
};
PausePlot.prototype.iteratePauses = function (f) {
if (this.start < this.idx) {
for (var i = this.start; i < this.idx; i++) {
f.call(this, i - this.start, this.pauses[i]);
}
} else {
for (var i = this.start; i < this.size; i++) {
f.call(this, i - this.start, this.pauses[i]);
}
var offs = this.size - this.start;
for (var i = 0; i < this.idx; i++) {
f.call(this, i + offs, this.pauses[i]);
}
}
};
PausePlot.prototype.draw = function () {
var first = null;
this.iteratePauses(function (i, v) {
if (first === null) {
first = v;
}
this.maxPause = Math.max(v, this.maxPause);
});
var dx = this.width / this.size;
var dy = this.height / this.maxPause;
this.ctx.save();
this.ctx.clearRect(0, 0, 480, 240);
this.ctx.beginPath();
this.ctx.moveTo(1, dy * this.pauses[this.start]);
var p = first;
this.iteratePauses(function (i, v) {
var delta = v - p;
var x = 1 + dx * i;
var y = dy * v;
this.ctx.lineTo(x, y);
if (delta > 2 * (p / 3)) {
this.ctx.font = "bold 12px sans-serif";
this.ctx.textBaseline = "bottom";
this.ctx.fillText(v + "ms", x + 2, y);
}
p = v;
});
this.ctx.strokeStyle = "black";
this.ctx.stroke();
this.ctx.restore();
}
function Scene(width, height) {
var canvas = document.createElement("canvas");
canvas.width = width;
canvas.height = height;
document.body.appendChild(canvas);
this.ctx = canvas.getContext('2d');
this.width = canvas.width;
this.height = canvas.height;
// Projection configuration.
this.x0 = canvas.width / 2;
this.y0 = canvas.height / 2;
this.z0 = 100;
this.f = 1000; // Focal length.
// Camera is rotating around y-axis.
this.angle = 0;
}
Scene.prototype.drawPoint = function (x, y, z, color) {
// Rotate the camera around y-axis.
var rx = x * Math.cos(this.angle) - z * Math.sin(this.angle);
var ry = y;
var rz = x * Math.sin(this.angle) + z * Math.cos(this.angle);
// Perform perspective projection.
var px = (this.f * rx) / (rz - this.z0) + this.x0;
var py = (this.f * ry) / (rz - this.z0) + this.y0;
this.ctx.save();
this.ctx.fillStyle = color
this.ctx.beginPath();
this.ctx.arc(px, py, kPointRadius, 0, 2 * Math.PI, true);
this.ctx.fill();
this.ctx.restore();
};
Scene.prototype.drawDyingPoints = function () {
var point_next = null;
for (var point = dyingPoints.head; point !== null; point = point_next) {
// Rotate the scene around y-axis.
scene.drawPoint(point.x, point.y, point.z, point.color());
point_next = point.next;
// Decay the current point and remove it from the list
// if it's life-force ran out.
if (point.decay()) {
dyingPoints.remove(point);
}
}
};
Scene.prototype.draw = function () {
this.ctx.save();
this.ctx.clearRect(0, 0, this.width, this.height);
this.drawDyingPoints();
this.ctx.restore();
this.angle += Math.PI / 90.0;
};
function render() {
if (typeof renderingStartTime === 'undefined') {
renderingStartTime = Date.now();
}
ModifyPointsSet();
scene.draw();
var renderingEndTime = Date.now();
var pause = renderingEndTime - renderingStartTime;
pausePlot.addPause(pause);
renderingStartTime = renderingEndTime;
pausePlot.draw();
div.innerHTML =
livePoints.count + "/" + dyingPoints.count + " " +
pause + "(max = " + pausePlot.maxPause + ") ms" ;
// Schedule next frame.
requestAnimationFrame(render);
}
function init() {
livePoints = new PointsList;
dyingPoints = new PointsList;
splayTree = new SplayTree();
scene = new Scene(640, 480);
div = document.createElement("div");
document.body.appendChild(div);
pausePlot = new PausePlot(480, 240, 160);
}
init();
render();

12
deps/v8/build/common.gypi

@ -60,8 +60,6 @@
'v8_enable_disassembler%': 0,
'v8_object_print%': 0,
'v8_enable_gdbjit%': 0,
# Enable profiling support. Only required on Windows.
@ -74,7 +72,6 @@
'v8_use_snapshot%': 'true',
'host_os%': '<(OS)',
'v8_use_liveobjectlist%': 'false',
'werror%': '-Werror',
# For a shared library build, results in "libv8-<(soname_version).so".
'soname_version%': '',
@ -87,9 +84,6 @@
['v8_enable_disassembler==1', {
'defines': ['ENABLE_DISASSEMBLER',],
}],
['v8_object_print==1', {
'defines': ['OBJECT_PRINT',],
}],
['v8_enable_gdbjit==1', {
'defines': ['ENABLE_GDB_JIT_INTERFACE',],
}],
@ -190,9 +184,6 @@
}],
],
}],
['OS=="solaris"', {
'defines': [ '__C99FEATURES__=1' ], # isinf() etc.
}],
],
'configurations': {
'Debug': {
@ -227,7 +218,7 @@
'cflags': [ '-I/usr/local/include' ],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'cflags': [ '-Wall', '-Werror', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor' ],
}],
],
@ -270,6 +261,7 @@
}],
['OS=="win"', {
'msvs_configuration_attributes': {
'OutputDirectory': '$(SolutionDir)$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},

7
deps/v8/build/standalone.gypi

@ -33,7 +33,6 @@
'component%': 'static_library',
'visibility%': 'hidden',
'msvs_multi_core_compile%': '1',
'variables': {
'variables': {
'variables': {
'conditions': [
@ -50,15 +49,11 @@
},
'host_arch%': '<(host_arch)',
'target_arch%': '<(host_arch)',
},
'host_arch%': '<(host_arch)',
'target_arch%': '<(target_arch)',
'v8_target_arch%': '<(target_arch)',
},
'host_arch%': '<(host_arch)',
'target_arch%': '<(target_arch)',
'v8_target_arch%': '<(v8_target_arch)',
'werror%': '-Werror',
'conditions': [
['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="x64" and host_arch!="x64")', {
@ -79,7 +74,7 @@
'conditions': [
[ 'OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
'target_defaults': {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'cflags': [ '-Wall', '-Werror', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-pthread', '-fno-rtti',
'-fno-exceptions', '-pedantic' ],
'ldflags': [ '-pthread', ],

5
deps/v8/include/v8-debug.h

@ -339,11 +339,6 @@ class EXPORT Debug {
static bool EnableAgent(const char* name, int port,
bool wait_for_connection = false);
/**
* Disable the V8 builtin debug agent. The TCP/IP connection will be closed.
*/
static void DisableAgent();
/**
* Makes V8 process all pending debug messages.
*

42
deps/v8/include/v8.h

@ -1171,8 +1171,7 @@ class String : public Primitive {
* Get the ExternalAsciiStringResource for an external ASCII string.
* Returns NULL if IsExternalAscii() doesn't return true.
*/
V8EXPORT const ExternalAsciiStringResource* GetExternalAsciiStringResource()
const;
V8EXPORT ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
static inline String* Cast(v8::Value* obj);
@ -2452,42 +2451,24 @@ class V8EXPORT TypeSwitch : public Data {
// --- Extensions ---
class V8EXPORT ExternalAsciiStringResourceImpl
: public String::ExternalAsciiStringResource {
public:
ExternalAsciiStringResourceImpl() : data_(0), length_(0) {}
ExternalAsciiStringResourceImpl(const char* data, size_t length)
: data_(data), length_(length) {}
const char* data() const { return data_; }
size_t length() const { return length_; }
private:
const char* data_;
size_t length_;
};
/**
* Ignore
*/
class V8EXPORT Extension { // NOLINT
public:
// Note that the strings passed into this constructor must live as long
// as the Extension itself.
Extension(const char* name,
const char* source = 0,
int dep_count = 0,
const char** deps = 0,
int source_length = -1);
const char** deps = 0);
virtual ~Extension() { }
virtual v8::Handle<v8::FunctionTemplate>
GetNativeFunction(v8::Handle<v8::String> name) {
return v8::Handle<v8::FunctionTemplate>();
}
const char* name() const { return name_; }
size_t source_length() const { return source_length_; }
const String::ExternalAsciiStringResource* source() const {
return &source_; }
const char* name() { return name_; }
const char* source() { return source_; }
int dependency_count() { return dep_count_; }
const char** dependencies() { return deps_; }
void set_auto_enable(bool value) { auto_enable_ = value; }
@ -2495,8 +2476,7 @@ class V8EXPORT Extension { // NOLINT
private:
const char* name_;
size_t source_length_; // expected to initialize before source_
ExternalAsciiStringResourceImpl source_;
const char* source_;
int dep_count_;
const char** deps_;
bool auto_enable_;
@ -3518,9 +3498,9 @@ class V8EXPORT Context {
*
* v8::Locker is a scoped lock object. While it's
* active (i.e. between its construction and destruction) the current thread is
* allowed to use the locked isolate. V8 guarantees that an isolate can be
* locked by at most one thread at any time. In other words, the scope of a
* v8::Locker is a critical section.
* allowed to use the locked isolate. V8 guarantees that an isolate can be locked
* by at most one thread at any time. In other words, the scope of a v8::Locker is
* a critical section.
*
* Sample usage:
* \code
@ -3622,8 +3602,8 @@ class V8EXPORT Locker {
static void StopPreemption();
/**
* Returns whether or not the locker for a given isolate, or default isolate
* if NULL is given, is locked by the current thread.
* Returns whether or not the locker for a given isolate, or default isolate if NULL is given,
* is locked by the current thread.
*/
static bool IsLocked(Isolate* isolate = NULL);
@ -3789,7 +3769,7 @@ class Internals {
static const int kFullStringRepresentationMask = 0x07;
static const int kExternalTwoByteRepresentationTag = 0x02;
static const int kJSObjectType = 0xa6;
static const int kJSObjectType = 0xa3;
static const int kFirstNonstringType = 0x80;
static const int kForeignType = 0x85;

2
deps/v8/src/SConscript

@ -84,7 +84,6 @@ SOURCES = {
hydrogen.cc
hydrogen-instructions.cc
ic.cc
incremental-marking.cc
inspector.cc
interpreter-irregexp.cc
isolate.cc
@ -134,7 +133,6 @@ SOURCES = {
v8utils.cc
variables.cc
version.cc
store-buffer.cc
zone.cc
extensions/gc-extension.cc
extensions/externalize-string-extension.cc

121
deps/v8/src/api.cc

@ -185,10 +185,7 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
int end_marker;
heap_stats.end_marker = &end_marker;
i::Isolate* isolate = i::Isolate::Current();
// BUG(1718):
// Don't use the take_snapshot since we don't support HeapIterator here
// without doing a special GC.
isolate->heap()->RecordStats(&heap_stats, false);
isolate->heap()->RecordStats(&heap_stats, take_snapshot);
i::V8::SetFatalError();
FatalErrorCallback callback = GetFatalErrorHandler();
{
@ -504,12 +501,9 @@ void RegisterExtension(Extension* that) {
Extension::Extension(const char* name,
const char* source,
int dep_count,
const char** deps,
int source_length)
const char** deps)
: name_(name),
source_length_(source_length >= 0 ?
source_length : (source ? strlen(source) : 0)),
source_(source, source_length_),
source_(source),
dep_count_(dep_count),
deps_(deps),
auto_enable_(false) { }
@ -1413,7 +1407,7 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
ScriptData* ScriptData::PreCompile(const char* input, int length) {
i::Utf8ToUC16CharacterStream stream(
reinterpret_cast<const unsigned char*>(input), length);
return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping);
}
@ -1422,10 +1416,10 @@ ScriptData* ScriptData::PreCompile(v8::Handle<String> source) {
if (str->IsExternalTwoByteString()) {
i::ExternalTwoByteStringUC16CharacterStream stream(
i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length());
return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping);
} else {
i::GenericStringUC16CharacterStream stream(str, 0, str->length());
return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping);
}
}
@ -1787,7 +1781,7 @@ v8::Handle<v8::StackTrace> Message::GetStackTrace() const {
static i::Handle<i::Object> CallV8HeapFunction(const char* name,
i::Handle<i::Object> recv,
int argc,
i::Handle<i::Object> argv[],
i::Object** argv[],
bool* has_pending_exception) {
i::Isolate* isolate = i::Isolate::Current();
i::Handle<i::String> fmt_str = isolate->factory()->LookupAsciiSymbol(name);
@ -1804,10 +1798,10 @@ static i::Handle<i::Object> CallV8HeapFunction(const char* name,
static i::Handle<i::Object> CallV8HeapFunction(const char* name,
i::Handle<i::Object> data,
bool* has_pending_exception) {
i::Handle<i::Object> argv[] = { data };
i::Object** argv[1] = { data.location() };
return CallV8HeapFunction(name,
i::Isolate::Current()->js_builtins_object(),
ARRAY_SIZE(argv),
1,
argv,
has_pending_exception);
}
@ -2627,11 +2621,10 @@ bool Value::Equals(Handle<Value> that) const {
if (obj->IsJSObject() && other->IsJSObject()) {
return *obj == *other;
}
i::Handle<i::Object> args[] = { other };
i::Object** args[1] = { other.location() };
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> result =
CallV8HeapFunction("EQUALS", obj, ARRAY_SIZE(args), args,
&has_pending_exception);
CallV8HeapFunction("EQUALS", obj, 1, args, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, false);
return *result == i::Smi::FromInt(i::EQUAL);
}
@ -3211,10 +3204,21 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
self,
i::JSObject::ALLOW_CREATION));
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
i::Handle<i::Object> result = i::SetHiddenProperty(self, key_obj, value_obj);
return *result == *self;
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj = i::SetProperty(
hidden_props,
key_obj,
value_obj,
static_cast<PropertyAttributes>(None),
i::kNonStrictMode);
has_pending_exception = obj.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, false);
return true;
}
@ -3224,9 +3228,20 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
return Local<v8::Value>());
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
self,
i::JSObject::OMIT_CREATION));
if (hidden_props->IsUndefined()) {
return v8::Local<v8::Value>();
}
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> result(self->GetHiddenProperty(*key_obj));
if (result->IsUndefined()) return v8::Local<v8::Value>();
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> result = i::GetProperty(hidden_props, key_obj);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, v8::Local<v8::Value>());
if (result->IsUndefined()) {
return v8::Local<v8::Value>();
}
return Utils::ToLocal(result);
}
@ -3237,9 +3252,15 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
self->DeleteHiddenProperty(*key_obj);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
self,
i::JSObject::OMIT_CREATION));
if (hidden_props->IsUndefined()) {
return true;
}
i::Handle<i::JSObject> js_obj(i::JSObject::cast(*hidden_props));
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
return i::DeleteProperty(js_obj, key_obj)->IsTrue();
}
@ -3289,12 +3310,22 @@ void PrepareExternalArrayElements(i::Handle<i::JSObject> object,
i::Handle<i::ExternalArray> array =
isolate->factory()->NewExternalArray(length, array_type, data);
// If the object already has external elements, create a new, unique
// map if the element type is now changing, because assumptions about
// generated code based on the receiver's map will be invalid.
i::Handle<i::HeapObject> elements(object->elements());
bool cant_reuse_map =
elements->map()->IsUndefined() ||
!elements->map()->has_external_array_elements() ||
elements->map() != isolate->heap()->MapForExternalArrayType(array_type);
if (cant_reuse_map) {
i::Handle<i::Map> external_array_map =
isolate->factory()->GetElementsTransitionMap(
object,
GetElementsKindFromExternalArrayType(array_type));
i::Handle<i::Map>(object->map()),
GetElementsKindFromExternalArrayType(array_type),
object->HasFastProperties());
object->set_map(*external_array_map);
}
object->set_elements(*array);
}
@ -3453,8 +3484,7 @@ bool v8::Object::IsCallable() {
}
Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
int argc,
Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv, int argc,
v8::Handle<v8::Value> argv[]) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::CallAsFunction()",
@ -3465,7 +3495,7 @@ Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
i::Object*** args = reinterpret_cast<i::Object***>(argv);
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>();
if (obj->IsJSFunction()) {
fun = i::Handle<i::JSFunction>::cast(obj);
@ -3495,7 +3525,7 @@ Local<v8::Value> Object::CallAsConstructor(int argc,
i::HandleScope scope(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
i::Object*** args = reinterpret_cast<i::Object***>(argv);
if (obj->IsJSFunction()) {
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(obj);
EXCEPTION_PREAMBLE(isolate);
@ -3537,7 +3567,7 @@ Local<v8::Object> Function::NewInstance(int argc,
HandleScope scope;
i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
i::Object*** args = reinterpret_cast<i::Object***>(argv);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> returned =
i::Execution::New(function, argc, args, &has_pending_exception);
@ -3558,7 +3588,7 @@ Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
i::Object*** args = reinterpret_cast<i::Object***>(argv);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> returned =
i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
@ -3769,11 +3799,10 @@ bool v8::String::IsExternalAscii() const {
void v8::String::VerifyExternalStringResource(
v8::String::ExternalStringResource* value) const {
i::Handle<i::String> str = Utils::OpenHandle(this);
const v8::String::ExternalStringResource* expected;
v8::String::ExternalStringResource* expected;
if (i::StringShape(*str).IsExternalTwoByte()) {
const void* resource =
i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
expected = reinterpret_cast<const ExternalStringResource*>(resource);
void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
expected = reinterpret_cast<ExternalStringResource*>(resource);
} else {
expected = NULL;
}
@ -3781,7 +3810,7 @@ void v8::String::VerifyExternalStringResource(
}
const v8::String::ExternalAsciiStringResource*
v8::String::ExternalAsciiStringResource*
v8::String::GetExternalAsciiStringResource() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
if (IsDeadCheck(str->GetIsolate(),
@ -3789,9 +3818,8 @@ const v8::String::ExternalAsciiStringResource*
return NULL;
}
if (i::StringShape(*str).IsExternalAscii()) {
const void* resource =
i::Handle<i::ExternalAsciiString>::cast(str)->resource();
return reinterpret_cast<const ExternalAsciiStringResource*>(resource);
void* resource = i::Handle<i::ExternalAsciiString>::cast(str)->resource();
return reinterpret_cast<ExternalAsciiStringResource*>(resource);
} else {
return NULL;
}
@ -3981,7 +4009,7 @@ bool v8::V8::IdleNotification() {
void v8::V8::LowMemoryNotification() {
i::Isolate* isolate = i::Isolate::Current();
if (!isolate->IsInitialized()) return;
isolate->heap()->CollectAllAvailableGarbage();
isolate->heap()->CollectAllGarbage(true);
}
@ -4500,7 +4528,6 @@ bool v8::String::MakeExternal(
bool v8::String::CanMakeExternal() {
if (!internal::FLAG_clever_optimizations) return false;
i::Handle<i::String> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
if (IsDeadCheck(isolate, "v8::String::CanMakeExternal()")) return false;
@ -5453,12 +5480,6 @@ bool Debug::EnableAgent(const char* name, int port, bool wait_for_connection) {
wait_for_connection);
}
void Debug::DisableAgent() {
return i::Isolate::Current()->debugger()->StopAgent();
}
void Debug::ProcessDebugMessages() {
i::Execution::ProcessDebugMesssages(true);
}

24
deps/v8/src/arm/assembler-arm-inl.h

@ -77,11 +77,6 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target);
if (host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
}
}
@ -106,10 +101,6 @@ Object** RelocInfo::target_object_address() {
void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
if (host() != NULL && target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
host(), &Memory::Object_at(pc_), HeapObject::cast(target));
}
}
@ -140,12 +131,6 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
if (host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
// evacuation candidate.
host()->GetHeap()->incremental_marking()->RecordWrite(
host(), NULL, cell);
}
}
@ -162,11 +147,6 @@ void RelocInfo::set_call_address(Address target) {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
}
}
@ -215,7 +195,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
visitor->VisitPointer(target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
@ -241,7 +221,7 @@ template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitEmbeddedPointer(heap, this);
StaticVisitor::VisitPointer(heap, target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {

12
deps/v8/src/arm/assembler-arm.cc

@ -78,9 +78,7 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
void CpuFeatures::Probe() {
unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
CpuFeaturesImpliedByCompiler());
ASSERT(supported_ == 0 || supported_ == standard_features);
ASSERT(!initialized_);
#ifdef DEBUG
initialized_ = true;
#endif
@ -88,7 +86,8 @@ void CpuFeatures::Probe() {
// Get the features implied by the OS and the compiler settings. This is the
// minimal set of features which is also alowed for generated code in the
// snapshot.
supported_ |= standard_features;
supported_ |= OS::CpuFeaturesImpliedByPlatform();
supported_ |= CpuFeaturesImpliedByCompiler();
if (Serializer::enabled()) {
// No probing for features if we might serialize (generate snapshot).
@ -2506,8 +2505,7 @@ void Assembler::dd(uint32_t data) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(pc_, rmode, data, NULL);
RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
// Adjust code for new modes.
ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
@ -2539,7 +2537,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {

10
deps/v8/src/arm/assembler-arm.h

@ -1209,10 +1209,6 @@ class Assembler : public AssemblerBase {
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Read/patch instructions
Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
void instr_at_put(int pos, Instr instr) {
*reinterpret_cast<Instr*>(buffer_ + pos) = instr;
}
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
static void instr_at_put(byte* pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr;
@ -1267,6 +1263,12 @@ class Assembler : public AssemblerBase {
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Read/patch instructions
Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
void instr_at_put(int pos, Instr instr) {
*reinterpret_cast<Instr*>(buffer_ + pos) = instr;
}
// Decode branch instruction at pos and return branch target pos
int target_at(int pos);

150
deps/v8/src/arm/builtins-arm.cc

@ -582,11 +582,10 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ bind(&convert_argument);
__ push(function); // Preserve the function.
__ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
__ push(r0);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
}
__ LeaveInternalFrame();
__ pop(function);
__ mov(argument, r0);
__ b(&argument_is_string);
@ -602,11 +601,10 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// create a string wrapper.
__ bind(&gc_required);
__ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
__ push(argument);
__ CallRuntime(Runtime::kNewStringWrapper, 1);
}
__ LeaveInternalFrame();
__ Ret();
}
@ -619,12 +617,12 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
Label slow, non_function_call;
Label non_function_call;
// Check that the function is not a smi.
__ JumpIfSmi(r1, &non_function_call);
// Check that the function is a JSFunction.
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &slow);
__ b(ne, &non_function_call);
// Jump to the function-specific construct stub.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
@ -633,19 +631,10 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// r0: number of arguments
// r1: called object
// r2: object type
Label do_call;
__ bind(&slow);
__ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(ne, &non_function_call);
__ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ jmp(&do_call);
__ bind(&non_function_call);
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ bind(&do_call);
// Set expected number of arguments to zero (not changing r0).
__ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ SetCallKind(r5, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@ -661,16 +650,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Isolate* isolate = masm->isolate();
// Enter a construct frame.
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
__ EnterConstructFrame();
// Preserve the two incoming parameters on the stack.
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
__ push(r0); // Smi-tagged arguments count.
__ push(r1); // Constructor function.
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
// Try to allocate the object without transitioning into C code. If any of the
// preconditions is not met, the code bails out to the runtime call.
Label rt_call, allocated;
if (FLAG_inline_new) {
Label undo_allocation;
@ -690,9 +678,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
__ b(ne, &rt_call);
// Check that the constructor is not constructing a JSFunction (see
// comments in Runtime_NewObject in runtime.cc). In which case the
// initial map's instance type would be JS_FUNCTION_TYPE.
// Check that the constructor is not constructing a JSFunction (see comments
// in Runtime_NewObject in runtime.cc). In which case the initial map's
// instance type would be JS_FUNCTION_TYPE.
// r1: constructor function
// r2: initial map
__ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
@ -727,8 +715,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
__ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to
// initial map and properties and elements are set to empty fixed array.
// Allocated the JSObject, now initialize the fields. Map is set to initial
// map and properties and elements are set to empty fixed array.
// r1: constructor function
// r2: initial map
// r3: object size
@ -750,43 +738,39 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r5: First in-object property of JSObject (not tagged)
__ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
__ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
{ Label loop, entry;
if (count_constructions) {
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
__ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
kBitsPerByte);
__ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2));
// r0: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
__ cmp(r0, r6);
__ Assert(le, "Unexpected number of pre-allocated property fields.");
}
__ InitializeFieldsWithFiller(r5, r0, r7);
// To allow for truncation.
__ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex);
} else {
__ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
}
__ b(&entry);
__ bind(&loop);
__ str(r7, MemOperand(r5, kPointerSize, PostIndex));
__ bind(&entry);
__ cmp(r5, r6);
__ b(lt, &loop);
}
__ InitializeFieldsWithFiller(r5, r6, r7);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
// failures need to undo the allocation, so that the heap is in a
// consistent state and verifiable.
// Add the object tag to make the JSObject real, so that we can continue and
// jump into the continuation code at any time from now on. Any failures
// need to undo the allocation, so that the heap is in a consistent state
// and verifiable.
__ add(r4, r4, Operand(kHeapObjectTag));
// Check if a non-empty properties array is needed. Continue with
// allocated object if not fall through to runtime call if it is.
// Check if a non-empty properties array is needed. Continue with allocated
// object if not fall through to runtime call if it is.
// r1: constructor function
// r4: JSObject
// r5: start of next object (not tagged)
__ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
// The field instance sizes contains both pre-allocated property fields
// and in-object properties.
// The field instance sizes contains both pre-allocated property fields and
// in-object properties.
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
__ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
kBitsPerByte);
__ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * 8, 8);
__ add(r3, r3, Operand(r6));
__ Ubfx(r6, r0, Map::kInObjectPropertiesByte * kBitsPerByte,
kBitsPerByte);
__ Ubfx(r6, r0, Map::kInObjectPropertiesByte * 8, 8);
__ sub(r3, r3, Operand(r6), SetCC);
// Done if no extra properties are to be allocated.
@ -980,10 +964,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// sp[1]: constructor function
// sp[2]: number of arguments (smi-tagged)
__ ldr(r1, MemOperand(sp, 2 * kPointerSize));
// Leave construct frame.
}
__ LeaveConstructFrame();
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
__ add(sp, sp, Operand(kPointerSize));
__ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
@ -1016,12 +997,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r4: argv
// r5-r7, cp may be clobbered
// Clear the context before we push it when entering the internal frame.
// Clear the context before we push it when entering the JS frame.
__ mov(cp, Operand(0, RelocInfo::NONE));
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
// Set up the context from the function argument.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
@ -1070,10 +1050,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ InvokeFunction(r1, actual, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
}
// Exit the JS frame and remove the parameters (except function), and
// return.
// Exit the JS frame and remove the parameters (except function), and return.
// Respect ABI stack constraint.
}
__ LeaveInternalFrame();
__ Jump(lr);
// r0: result
@ -1092,8 +1072,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
// Preserve the function.
__ push(r1);
@ -1111,8 +1090,8 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
// Restore saved function.
__ pop(r1);
// Tear down internal frame.
}
// Tear down temporary frame.
__ LeaveInternalFrame();
// Do a tail-call of the compiled function.
__ Jump(r2);
@ -1121,8 +1100,7 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
// Preserve the function.
__ push(r1);
@ -1140,8 +1118,8 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
// Restore saved function.
__ pop(r1);
// Tear down internal frame.
}
// Tear down temporary frame.
__ LeaveInternalFrame();
// Do a tail-call of the compiled function.
__ Jump(r2);
@ -1150,13 +1128,12 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
// Pass the function and deoptimization type to the runtime system.
__ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
__ push(r0);
__ CallRuntime(Runtime::kNotifyDeoptimized, 1);
}
__ LeaveInternalFrame();
// Get the full codegen state from the stack and untag it -> r6.
__ ldr(r6, MemOperand(sp, 0 * kPointerSize));
@ -1196,10 +1173,9 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
// the registers without worrying about which of them contain
// pointers. This seems a bit fragile.
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
__ CallRuntime(Runtime::kNotifyOSR, 0);
}
__ LeaveInternalFrame();
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
__ Ret();
}
@ -1215,11 +1191,10 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame and push it as an
// argument to the on-stack replacement function.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
__ push(r0);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
}
__ LeaveInternalFrame();
// If the result was -1 it means that we couldn't optimize the
// function. Just return and continue in the unoptimized version.
@ -1301,10 +1276,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ b(ge, &shift_arguments);
__ bind(&convert_to_object);
{
// Enter an internal frame in order to preserve argument count.
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame(); // In order to preserve argument count.
__ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged.
__ push(r0);
@ -1314,10 +1286,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ pop(r0);
__ mov(r0, Operand(r0, ASR, kSmiTagSize));
// Exit the internal frame.
}
__ LeaveInternalFrame();
// Restore the function to r1, and the flag to r4.
__ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
__ mov(r4, Operand(0, RelocInfo::NONE));
@ -1437,8 +1406,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kRecvOffset = 3 * kPointerSize;
const int kFunctionOffset = 4 * kPointerSize;
{
FrameScope frame_scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
__ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
__ push(r0);
@ -1572,7 +1540,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ InvokeFunction(r1, actual, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
frame_scope.GenerateLeaveFrame();
// Tear down the internal frame and remove function, receiver and args.
__ LeaveInternalFrame();
__ add(sp, sp, Operand(3 * kPointerSize));
__ Jump(lr);
@ -1586,8 +1555,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
// Tear down the internal frame and remove function, receiver and args.
}
__ LeaveInternalFrame();
__ add(sp, sp, Operand(3 * kPointerSize));
__ Jump(lr);
}

532
deps/v8/src/arm/code-stubs-arm.cc

@ -189,72 +189,6 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
}
void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
//
// [sp]: function.
// [sp + kPointerSize]: serialized scope info
// Try to allocate the context in new space.
Label gc;
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
__ AllocateInNewSpace(FixedArray::SizeFor(length),
r0, r1, r2, &gc, TAG_OBJECT);
// Load the function from the stack.
__ ldr(r3, MemOperand(sp, 0));
// Load the serialized scope info from the stack.
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
// Setup the object header.
__ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ mov(r2, Operand(Smi::FromInt(length)));
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
// If this block context is nested in the global context we get a smi
// sentinel instead of a function. The block context should get the
// canonical empty function of the global context as its closure which
// we still have to look up.
Label after_sentinel;
__ JumpIfNotSmi(r3, &after_sentinel);
if (FLAG_debug_code) {
const char* message = "Expected 0 as a Smi sentinel";
__ cmp(r3, Operand::Zero());
__ Assert(eq, message);
}
__ ldr(r3, GlobalObjectOperand());
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
__ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
// Setup the fixed slots.
__ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
__ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
__ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
// Copy the global object from the previous context.
__ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
__ str(r1, ContextOperand(r0, Context::GLOBAL_INDEX));
// Initialize the rest of the slots to the hole value.
__ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
for (int i = 0; i < slots_; i++) {
__ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS));
}
// Remove the on-stack argument and return.
__ mov(cp, r0);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
// Need to collect. Call into runtime system.
__ bind(&gc);
__ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
}
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
//
@ -904,11 +838,9 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
__ vmov(d0, r0, r1);
__ vmov(d1, r2, r3);
}
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(
ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
}
// Call C routine that may not cause GC or other trouble.
__ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
0, 2);
// Store answer in the overwritable heap number. Double returned in
// registers r0 and r1 or in d0.
if (masm->use_eabi_hardfloat()) {
@ -925,29 +857,6 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
}
bool WriteInt32ToHeapNumberStub::IsPregenerated() {
// These variants are compiled ahead of time. See next method.
if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
return true;
}
if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
return true;
}
// Other register combinations are generated as and when they are needed,
// so it is unsafe to call them from stubs (we can't generate a stub while
// we are generating a stub).
return false;
}
void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
stub1.GetCode()->set_is_pregenerated(true);
stub2.GetCode()->set_is_pregenerated(true);
}
// See comment for class.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
@ -1288,8 +1197,6 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
__ vmov(d0, r0, r1);
__ vmov(d1, r2, r3);
}
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
0, 2);
__ pop(pc); // Return.
@ -1307,7 +1214,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
Label first_non_object;
// Get the type of the first operand into r2 and compare it with
// FIRST_SPEC_OBJECT_TYPE.
@ -1699,8 +1606,6 @@ void CompareStub::Generate(MacroAssembler* masm) {
// The stub expects its argument in the tos_ register and returns its result in
// it, too: zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
// This stub uses VFP3 instructions.
CpuFeatures::Scope scope(VFP3);
@ -1808,41 +1713,6 @@ void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
}
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
// restore them.
__ stm(db_w, sp, kCallerSaved | lr.bit());
if (save_doubles_ == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP3);
__ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
__ vstr(reg, MemOperand(sp, i * kDoubleSize));
}
}
const int argument_count = 1;
const int fp_argument_count = 0;
const Register scratch = r1;
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
__ mov(r0, Operand(ExternalReference::isolate_address()));
__ CallCFunction(
ExternalReference::store_buffer_overflow_function(masm->isolate()),
argument_count);
if (save_doubles_ == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP3);
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
__ vldr(reg, MemOperand(sp, i * kDoubleSize));
}
__ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
}
__ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
}
void UnaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name = NULL; // Make g++ happy.
@ -1996,13 +1866,12 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
__ push(r0);
__ CallRuntime(Runtime::kNumberAlloc, 0);
__ mov(r1, Operand(r0));
__ pop(r0);
}
__ LeaveInternalFrame();
__ bind(&heapnumber_allocated);
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
@ -2043,14 +1912,13 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
__ push(r0); // Push the heap number, not the untagged int32.
__ CallRuntime(Runtime::kNumberAlloc, 0);
__ mov(r2, r0); // Move the new heap number into r2.
// Get the heap number into r0, now that the new heap number is in r2.
__ pop(r0);
}
__ LeaveInternalFrame();
// Convert the heap number in r0 to an untagged integer in r1.
// This can't go slow-case because it's the same number we already
@ -2160,10 +2028,6 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
void BinaryOpStub::Generate(MacroAssembler* masm) {
// Explicitly allow generation of nested stubs. It is safe here because
// generation code does not use any raw pointers.
AllowStubCallsScope allow_stub_calls(masm, true);
switch (operands_type_) {
case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
@ -3269,11 +3133,10 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
__ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
__ push(r0);
__ CallRuntime(RuntimeFunction(), 1);
}
__ LeaveInternalFrame();
__ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ Ret();
@ -3286,15 +3149,14 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// We return the value in d2 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
// Allocate an aligned object larger than a HeapNumber.
ASSERT(4 * kPointerSize >= HeapNumber::kSize);
__ mov(scratch0, Operand(4 * kPointerSize));
__ push(scratch0);
__ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
}
__ LeaveInternalFrame();
__ Ret();
}
}
@ -3311,7 +3173,6 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
} else {
__ vmov(r0, r1, d2);
}
AllowExternalCallThatCantCauseGC scope(masm);
switch (type_) {
case TranscendentalCache::SIN:
__ CallCFunction(ExternalReference::math_sin_double_function(isolate),
@ -3407,14 +3268,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(lr);
__ PrepareCallCFunction(1, 1, scratch);
__ SetCallCDoubleArguments(double_base, exponent);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(
ExternalReference::power_double_int_function(masm->isolate()),
1, 1);
__ pop(lr);
__ GetCFunctionDoubleResult(double_result);
}
__ vstr(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(r0, heapnumber);
@ -3440,14 +3298,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(lr);
__ PrepareCallCFunction(0, 2, scratch);
__ SetCallCDoubleArguments(double_base, double_exponent);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()),
0, 2);
__ pop(lr);
__ GetCFunctionDoubleResult(double_result);
}
__ vstr(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(r0, heapnumber);
@ -3464,37 +3319,6 @@ bool CEntryStub::NeedsImmovableCode() {
}
bool CEntryStub::IsPregenerated() {
return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
result_size_ == 1;
}
void CodeStub::GenerateStubsAheadOfTime() {
CEntryStub::GenerateAheadOfTime();
WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
}
void CodeStub::GenerateFPStubs() {
CEntryStub save_doubles(1, kSaveFPRegs);
Handle<Code> code = save_doubles.GetCode();
code->set_is_pregenerated(true);
StoreBufferOverflowStub stub(kSaveFPRegs);
stub.GetCode()->set_is_pregenerated(true);
code->GetIsolate()->set_fp_stubs_generated(true);
}
void CEntryStub::GenerateAheadOfTime() {
CEntryStub stub(1, kDontSaveFPRegs);
Handle<Code> code = stub.GetCode();
code->set_is_pregenerated(true);
}
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ Throw(r0);
}
@ -3606,7 +3430,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ b(eq, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
__ mov(r3, Operand(isolate->factory()->the_hole_value()));
__ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
__ ldr(r3, MemOperand(ip));
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ ldr(r0, MemOperand(ip));
@ -3644,7 +3469,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ sub(r6, r6, Operand(kPointerSize));
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles_);
// Setup argc and the builtin function in callee-saved registers.
@ -3789,7 +3613,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// saved values before returning a failure to C.
// Clear any pending exceptions.
__ mov(r5, Operand(isolate->factory()->the_hole_value()));
__ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
__ ldr(r5, MemOperand(ip));
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ str(r5, MemOperand(ip));
@ -4026,11 +3851,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
__ Push(r0, r1);
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
}
__ LeaveInternalFrame();
__ cmp(r0, Operand::Zero());
__ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
__ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
@ -4426,6 +4250,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
if (!FLAG_regexp_entry_native) {
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
return;
}
// Stack frame on entry.
// sp[0]: last_match_info (expected JSArray)
@ -4652,7 +4480,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// For arguments 4 and 3 get string length, calculate start of string data and
// calculate the shift of the index (0 for ASCII and 1 for two byte).
__ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
__ add(r8, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ eor(r3, r3, Operand(1));
// Load the length from the original subject string from the previous stack
// frame. Therefore we have to use fp, which points exactly to two pointer
@ -4703,7 +4532,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
__ mov(r1, Operand(isolate->factory()->the_hole_value()));
__ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate)));
__ ldr(r1, MemOperand(r1, 0));
__ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ ldr(r0, MemOperand(r2, 0));
@ -4745,25 +4575,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ str(r2, FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastCaptureCountOffset));
// Store last subject and last input.
__ mov(r3, last_match_info_elements); // Moved up to reduce latency.
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastSubjectOffset));
__ mov(r2, subject);
__ RecordWriteField(last_match_info_elements,
RegExpImpl::kLastSubjectOffset,
r2,
r7,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
__ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
__ RecordWriteField(last_match_info_elements,
RegExpImpl::kLastInputOffset,
subject,
r7,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
__ mov(r3, last_match_info_elements);
__ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
@ -4891,22 +4712,6 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
void CallFunctionStub::FinishCode(Code* code) {
code->set_has_function_cache(false);
}
void CallFunctionStub::Clear(Heap* heap, Address address) {
UNREACHABLE();
}
Object* CallFunctionStub::GetCachedValue(Address address) {
UNREACHABLE();
return NULL;
}
void CallFunctionStub::Generate(MacroAssembler* masm) {
Label slow, non_function;
@ -5084,26 +4889,23 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ cmp(result_, Operand(ip));
__ b(ne, &call_runtime_);
// Get the first of the two strings and load its instance type.
__ ldr(result_, FieldMemOperand(object_, ConsString::kFirstOffset));
__ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
__ jmp(&assure_seq_string);
// SlicedString, unpack and add offset.
__ bind(&sliced_string);
__ ldr(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
__ add(scratch_, scratch_, result_);
__ ldr(result_, FieldMemOperand(object_, SlicedString::kParentOffset));
__ ldr(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
// Assure that we are dealing with a sequential string. Go to runtime if not.
__ bind(&assure_seq_string);
__ ldr(result_, FieldMemOperand(result_, HeapObject::kMapOffset));
__ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
// Check that parent is not an external string. Go to runtime otherwise.
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(result_, Operand(kStringRepresentationMask));
__ b(ne, &call_runtime_);
// Actually fetch the parent string if it is confirmed to be sequential.
STATIC_ASSERT(SlicedString::kParentOffset == ConsString::kFirstOffset);
__ ldr(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
// Check for 1-byte or 2-byte string.
__ bind(&flat_string);
@ -6623,13 +6425,12 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
// Call the runtime system in a fresh internal frame.
ExternalReference miss =
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
__ Push(r1, r0);
__ mov(ip, Operand(Smi::FromInt(op_)));
__ push(ip);
__ CallExternalReference(miss, 3);
}
__ LeaveInternalFrame();
// Compute the entry point of the rewritten stub.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@ -6812,8 +6613,6 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
// Registers:
// result: StringDictionary to probe
// r1: key
@ -6903,267 +6702,6 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
}
struct AheadOfTimeWriteBarrierStubList {
Register object, value, address;
RememberedSetAction action;
};
struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Used in RegExpExecStub.
{ r6, r4, r7, EMIT_REMEMBERED_SET },
{ r6, r2, r7, EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
// Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
// Also used in KeyedStoreIC::GenerateGeneric.
{ r3, r4, r5, EMIT_REMEMBERED_SET },
// Used in CompileStoreGlobal.
{ r4, r1, r2, OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
{ r1, r2, r3, EMIT_REMEMBERED_SET },
{ r3, r2, r1, EMIT_REMEMBERED_SET },
// Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
{ r2, r1, r3, EMIT_REMEMBERED_SET },
{ r3, r1, r2, EMIT_REMEMBERED_SET },
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ r4, r2, r3, EMIT_REMEMBERED_SET },
// Null termination.
{ no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
};
bool RecordWriteStub::IsPregenerated() {
for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
if (object_.is(entry->object) &&
value_.is(entry->value) &&
address_.is(entry->address) &&
remembered_set_action_ == entry->action &&
save_fp_regs_mode_ == kDontSaveFPRegs) {
return true;
}
}
return false;
}
bool StoreBufferOverflowStub::IsPregenerated() {
return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
}
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
StoreBufferOverflowStub stub1(kDontSaveFPRegs);
stub1.GetCode()->set_is_pregenerated(true);
}
void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
RecordWriteStub stub(entry->object,
entry->value,
entry->address,
entry->action,
kDontSaveFPRegs);
stub.GetCode()->set_is_pregenerated(true);
}
}
// Takes the input in 3 registers: address_ value_ and object_. A pointer to
// the value has just been written into the object, now this stub makes sure
// we keep the GC informed. The word in the object where the value has been
// written is in the address register.
void RecordWriteStub::Generate(MacroAssembler* masm) {
Label skip_to_incremental_noncompacting;
Label skip_to_incremental_compacting;
// The first two instructions are generated with labels so as to get the
// offset fixed up correctly by the bind(Label*) call. We patch it back and
// forth between a compare instructions (a nop in this position) and the
// real branch when we start and stop incremental heap marking.
// See RecordWriteStub::Patch for details.
__ b(&skip_to_incremental_noncompacting);
__ b(&skip_to_incremental_compacting);
if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
__ RememberedSetHelper(object_,
address_,
value_,
save_fp_regs_mode_,
MacroAssembler::kReturnAtEnd);
}
__ Ret();
__ bind(&skip_to_incremental_noncompacting);
GenerateIncremental(masm, INCREMENTAL);
__ bind(&skip_to_incremental_compacting);
GenerateIncremental(masm, INCREMENTAL_COMPACTION);
// Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
// Will be checked in IncrementalMarking::ActivateGeneratedStub.
ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
PatchBranchIntoNop(masm, 0);
PatchBranchIntoNop(masm, Assembler::kInstrSize);
}
void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.Save(masm);
if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
Label dont_need_remembered_set;
__ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
__ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
regs_.scratch0(),
&dont_need_remembered_set);
__ CheckPageFlag(regs_.object(),
regs_.scratch0(),
1 << MemoryChunk::SCAN_ON_SCAVENGE,
ne,
&dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.
CheckNeedsToInformIncrementalMarker(
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
InformIncrementalMarker(masm, mode);
regs_.Restore(masm);
__ RememberedSetHelper(object_,
address_,
value_,
save_fp_regs_mode_,
MacroAssembler::kReturnAtEnd);
__ bind(&dont_need_remembered_set);
}
CheckNeedsToInformIncrementalMarker(
masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
InformIncrementalMarker(masm, mode);
regs_.Restore(masm);
__ Ret();
}
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
Register address =
r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
ASSERT(!address.is(regs_.object()));
ASSERT(!address.is(r0));
__ Move(address, regs_.address());
__ Move(r0, regs_.object());
if (mode == INCREMENTAL_COMPACTION) {
__ Move(r1, address);
} else {
ASSERT(mode == INCREMENTAL);
__ ldr(r1, MemOperand(address, 0));
}
__ mov(r2, Operand(ExternalReference::isolate_address()));
AllowExternalCallThatCantCauseGC scope(masm);
if (mode == INCREMENTAL_COMPACTION) {
__ CallCFunction(
ExternalReference::incremental_evacuation_record_write_function(
masm->isolate()),
argument_count);
} else {
ASSERT(mode == INCREMENTAL);
__ CallCFunction(
ExternalReference::incremental_marking_record_write_function(
masm->isolate()),
argument_count);
}
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
}
void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode) {
Label on_black;
Label need_incremental;
Label need_incremental_pop_scratch;
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
__ RememberedSetHelper(object_,
address_,
value_,
save_fp_regs_mode_,
MacroAssembler::kReturnAtEnd);
} else {
__ Ret();
}
__ bind(&on_black);
// Get the value from the slot.
__ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
if (mode == INCREMENTAL_COMPACTION) {
Label ensure_not_white;
__ CheckPageFlag(regs_.scratch0(), // Contains value.
regs_.scratch1(), // Scratch.
MemoryChunk::kEvacuationCandidateMask,
eq,
&ensure_not_white);
__ CheckPageFlag(regs_.object(),
regs_.scratch1(), // Scratch.
MemoryChunk::kSkipEvacuationSlotsRecordingMask,
eq,
&need_incremental);
__ bind(&ensure_not_white);
}
// We need extra registers for this, so we push the object and the address
// register temporarily.
__ Push(regs_.object(), regs_.address());
__ EnsureNotWhite(regs_.scratch0(), // The value.
regs_.scratch1(), // Scratch.
regs_.object(), // Scratch.
regs_.address(), // Scratch.
&need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
__ RememberedSetHelper(object_,
address_,
value_,
save_fp_regs_mode_,
MacroAssembler::kReturnAtEnd);
} else {
__ Ret();
}
__ bind(&need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
__ bind(&need_incremental);
// Fall through when we need to inform the incremental marker.
}
#undef __
} } // namespace v8::internal

245
deps/v8/src/arm/code-stubs-arm.h

@ -58,25 +58,6 @@ class TranscendentalCacheStub: public CodeStub {
};
class StoreBufferOverflowStub: public CodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
void Generate(MacroAssembler* masm);
virtual bool IsPregenerated();
static void GenerateFixedRegStubsAheadOfTime();
virtual bool SometimesSetsUpAFrame() { return false; }
private:
SaveFPRegsMode save_doubles_;
Major MajorKey() { return StoreBufferOverflow; }
int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
};
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op,
@ -342,9 +323,6 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
the_heap_number_(the_heap_number),
scratch_(scratch) { }
bool IsPregenerated();
static void GenerateFixedRegStubsAheadOfTime();
private:
Register the_int_;
Register the_heap_number_;
@ -393,225 +371,6 @@ class NumberToStringStub: public CodeStub {
};
class RecordWriteStub: public CodeStub {
public:
RecordWriteStub(Register object,
Register value,
Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
: object_(object),
value_(value),
address_(address),
remembered_set_action_(remembered_set_action),
save_fp_regs_mode_(fp_mode),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
}
enum Mode {
STORE_BUFFER_ONLY,
INCREMENTAL,
INCREMENTAL_COMPACTION
};
virtual bool IsPregenerated();
static void GenerateFixedRegStubsAheadOfTime();
virtual bool SometimesSetsUpAFrame() { return false; }
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos)));
}
static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27);
ASSERT(Assembler::IsBranch(masm->instr_at(pos)));
}
static Mode GetMode(Code* stub) {
Instr first_instruction = Assembler::instr_at(stub->instruction_start());
Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
Assembler::kInstrSize);
if (Assembler::IsBranch(first_instruction)) {
return INCREMENTAL;
}
ASSERT(Assembler::IsTstImmediate(first_instruction));
if (Assembler::IsBranch(second_instruction)) {
return INCREMENTAL_COMPACTION;
}
ASSERT(Assembler::IsTstImmediate(second_instruction));
return STORE_BUFFER_ONLY;
}
static void Patch(Code* stub, Mode mode) {
MacroAssembler masm(NULL,
stub->instruction_start(),
stub->instruction_size());
switch (mode) {
case STORE_BUFFER_ONLY:
ASSERT(GetMode(stub) == INCREMENTAL ||
GetMode(stub) == INCREMENTAL_COMPACTION);
PatchBranchIntoNop(&masm, 0);
PatchBranchIntoNop(&masm, Assembler::kInstrSize);
break;
case INCREMENTAL:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
PatchNopIntoBranch(&masm, 0);
break;
case INCREMENTAL_COMPACTION:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
PatchNopIntoBranch(&masm, Assembler::kInstrSize);
break;
}
ASSERT(GetMode(stub) == mode);
CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize);
}
private:
// This is a helper class for freeing up 3 scratch registers. The input is
// two registers that must be preserved and one scratch register provided by
// the caller.
class RegisterAllocation {
public:
RegisterAllocation(Register object,
Register address,
Register scratch0)
: object_(object),
address_(address),
scratch0_(scratch0) {
ASSERT(!AreAliased(scratch0, object, address, no_reg));
scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
}
void Save(MacroAssembler* masm) {
ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
// We don't have to save scratch0_ because it was given to us as
// a scratch register.
masm->push(scratch1_);
}
void Restore(MacroAssembler* masm) {
masm->pop(scratch1_);
}
// If we have to call into C then we need to save and restore all caller-
// saved registers that were not already preserved. The scratch registers
// will be restored by other means so we don't bother pushing them here.
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
if (mode == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP3);
masm->sub(sp,
sp,
Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
// Save all VFP registers except d0.
for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
}
}
}
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP3);
// Restore all VFP registers except d0.
for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
}
masm->add(sp,
sp,
Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
}
masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
}
inline Register object() { return object_; }
inline Register address() { return address_; }
inline Register scratch0() { return scratch0_; }
inline Register scratch1() { return scratch1_; }
private:
Register object_;
Register address_;
Register scratch0_;
Register scratch1_;
Register GetRegThatIsNotOneOf(Register r1,
Register r2,
Register r3) {
for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(r1)) continue;
if (candidate.is(r2)) continue;
if (candidate.is(r3)) continue;
return candidate;
}
UNREACHABLE();
return no_reg;
}
friend class RecordWriteStub;
};
enum OnNoNeedToInformIncrementalMarker {
kReturnOnNoNeedToInformIncrementalMarker,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
void Generate(MacroAssembler* masm);
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
Major MajorKey() { return RecordWrite; }
int MinorKey() {
return ObjectBits::encode(object_.code()) |
ValueBits::encode(value_.code()) |
AddressBits::encode(address_.code()) |
RememberedSetActionBits::encode(remembered_set_action_) |
SaveFPRegsModeBits::encode(save_fp_regs_mode_);
}
bool MustBeInStubCache() {
// All stubs must be registered in the stub cache
// otherwise IncrementalMarker would not be able to find
// and patch it.
return true;
}
void Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
class ObjectBits: public BitField<int, 0, 4> {};
class ValueBits: public BitField<int, 4, 4> {};
class AddressBits: public BitField<int, 8, 4> {};
class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
Register object_;
Register value_;
Register address_;
RememberedSetAction remembered_set_action_;
SaveFPRegsMode save_fp_regs_mode_;
Label slow_;
RegisterAllocation regs_;
};
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM.
@ -816,8 +575,6 @@ class StringDictionaryLookupStub: public CodeStub {
Register r0,
Register r1);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
@ -830,7 +587,7 @@ class StringDictionaryLookupStub: public CodeStub {
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
Major MajorKey() { return StringDictionaryLookup; }
Major MajorKey() { return StringDictionaryNegativeLookup; }
int MinorKey() {
return LookupModeBits::encode(mode_);

8
deps/v8/src/arm/codegen-arm.cc

@ -38,16 +38,12 @@ namespace internal {
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterFrame(StackFrame::INTERNAL);
ASSERT(!masm->has_frame());
masm->set_has_frame(true);
masm->EnterInternalFrame();
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->LeaveFrame(StackFrame::INTERNAL);
ASSERT(masm->has_frame());
masm->set_has_frame(false);
masm->LeaveInternalFrame();
}

10
deps/v8/src/arm/codegen-arm.h

@ -69,6 +69,16 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
// Constants related to patching of inlined load/store.
static int GetInlinedKeyedLoadInstructionsAfterPatch() {
return FLAG_debug_code ? 32 : 13;
}
static const int kInlinedKeyedStoreInstructionsAfterPatch = 8;
static int GetInlinedNamedStoreInstructionsAfterPatch() {
ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
return Isolate::Current()->inlined_write_barrier_size() + 4;
}
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};

6
deps/v8/src/arm/debug-arm.cc

@ -132,8 +132,7 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
// Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values
@ -181,8 +180,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
}
}
// Leave the internal frame.
}
__ LeaveInternalFrame();
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was

28
deps/v8/src/arm/deoptimizer-arm.cc

@ -112,19 +112,12 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
#endif
Isolate* isolate = code->GetIsolate();
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
DeoptimizerData* data = isolate->deoptimizer_data();
DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
node->set_next(data->deoptimizing_code_list_);
data->deoptimizing_code_list_ = node;
// We might be in the middle of incremental marking with compaction.
// Tell collector to treat this code object in a special way and
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(code);
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
@ -141,8 +134,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
@ -177,13 +169,6 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
reinterpret_cast<uint32_t>(check_code->entry()));
Memory::uint32_at(stack_check_address_pointer) =
reinterpret_cast<uint32_t>(replacement_code->entry());
RelocInfo rinfo(pc_after - 2 * kInstrSize,
RelocInfo::CODE_TARGET,
0,
unoptimized_code);
unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode(
unoptimized_code, &rinfo, replacement_code);
}
@ -208,9 +193,6 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
reinterpret_cast<uint32_t>(replacement_code->entry()));
Memory::uint32_at(stack_check_address_pointer) =
reinterpret_cast<uint32_t>(check_code->entry());
check_code->GetHeap()->incremental_marking()->
RecordCodeTargetPatch(pc_after - 2 * kInstrSize, check_code);
}
@ -650,10 +632,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(r5, Operand(ExternalReference::isolate_address()));
__ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
}
// Preserve "deoptimizer" object in register r0 and get the input
// frame descriptor pointer to r1 (deoptimizer->input_);
@ -707,11 +686,8 @@ void Deoptimizer::EntryGenerator::Generate() {
// r0: deoptimizer object; r1: scratch.
__ PrepareCallCFunction(1, r1);
// Call Deoptimizer::ComputeOutputFrames().
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate), 1);
}
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.

10
deps/v8/src/arm/frames-arm.h

@ -70,16 +70,6 @@ static const RegList kCalleeSaved =
1 << 10 | // r10 v7
1 << 11; // r11 v8 (fp in JavaScript code)
// When calling into C++ (only for C++ calls that can't cause a GC).
// The call code will take care of lr, fp, etc.
static const RegList kCallerSaved =
1 << 0 | // r0
1 << 1 | // r1
1 << 2 | // r2
1 << 3 | // r3
1 << 9; // r9
static const int kNumCalleeSaved = 7 + kR9Available;
// Double registers d8 to d15 are callee-saved.

227
deps/v8/src/arm/full-codegen-arm.cc

@ -39,7 +39,6 @@
#include "stub-cache.h"
#include "arm/code-stubs-arm.h"
#include "arm/macro-assembler-arm.h"
namespace v8 {
namespace internal {
@ -156,11 +155,6 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ bind(&ok);
}
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
int locals_count = info->scope()->num_stack_slots();
__ Push(lr, fp, cp, r1);
@ -206,12 +200,13 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Load parameter from stack.
__ ldr(r0, MemOperand(fp, parameter_offset));
// Store it in the context.
MemOperand target = ContextOperand(cp, var->index());
__ str(r0, target);
// Update the write barrier.
__ RecordWriteContextSlot(
cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs);
__ mov(r1, Operand(Context::SlotOffset(var->index())));
__ str(r0, MemOperand(cp, r1));
// Update the write barrier. This clobbers all involved
// registers, so we have to use two more registers to avoid
// clobbering cp.
__ mov(r2, Operand(cp));
__ RecordWrite(r2, Operand(r1), r3, r0);
}
}
}
@ -269,7 +264,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
int ignored = 0;
EmitDeclaration(scope()->function(), CONST, NULL, &ignored);
EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
}
VisitDeclarations(scope()->declarations());
}
@ -670,15 +665,12 @@ void FullCodeGenerator::SetVar(Variable* var,
ASSERT(!scratch1.is(src));
MemOperand location = VarOperand(var, scratch0);
__ str(src, location);
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
__ RecordWriteContextSlot(scratch0,
location.offset(),
src,
__ RecordWrite(scratch0,
Operand(Context::SlotOffset(var->index())),
scratch1,
kLRHasBeenSaved,
kDontSaveFPRegs);
src);
}
}
@ -711,7 +703,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
VariableMode mode,
Variable::Mode mode,
FunctionLiteral* function,
int* global_count) {
// If it was not possible to allocate the variable at compile time, we
@ -729,7 +721,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ str(result_register(), StackOperand(variable));
} else if (mode == CONST || mode == LET) {
} else if (mode == Variable::CONST || mode == Variable::LET) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ str(ip, StackOperand(variable));
@ -754,16 +746,10 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
__ str(result_register(), ContextOperand(cp, variable->index()));
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(cp,
offset,
result_register(),
r2,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ mov(r1, Operand(cp));
__ RecordWrite(r1, Operand(offset), r2, result_register());
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
} else if (mode == CONST || mode == LET) {
} else if (mode == Variable::CONST || mode == Variable::LET) {
Comment cmnt(masm_, "[ Declaration");
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ str(ip, ContextOperand(cp, variable->index()));
@ -776,8 +762,10 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
__ mov(r2, Operand(variable->name()));
// Declaration nodes are always introduced in one of three modes.
ASSERT(mode == VAR || mode == CONST || mode == LET);
PropertyAttributes attr = (mode == CONST) ? READ_ONLY : NONE;
ASSERT(mode == Variable::VAR ||
mode == Variable::CONST ||
mode == Variable::LET);
PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
__ mov(r1, Operand(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@ -787,7 +775,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
__ Push(cp, r2, r1);
// Push initial value for function declaration.
VisitForStackValue(function);
} else if (mode == CONST || mode == LET) {
} else if (mode == Variable::CONST || mode == Variable::LET) {
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
__ Push(cp, r2, r1, r0);
} else {
@ -1217,23 +1205,15 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
if (var->mode() == DYNAMIC_GLOBAL) {
if (var->mode() == Variable::DYNAMIC_GLOBAL) {
EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
__ jmp(done);
} else if (var->mode() == DYNAMIC_LOCAL) {
} else if (var->mode() == Variable::DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
if (local->mode() == CONST ||
local->mode() == LET) {
if (local->mode() == Variable::CONST) {
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
if (local->mode() == CONST) {
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
} else { // LET
__ b(ne, done);
__ mov(r0, Operand(var->name()));
__ push(r0);
__ CallRuntime(Runtime::kThrowReferenceError, 1);
}
}
__ jmp(done);
}
@ -1266,13 +1246,13 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Comment cmnt(masm_, var->IsContextSlot()
? "Context variable"
: "Stack variable");
if (var->mode() != LET && var->mode() != CONST) {
if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
context()->Plug(var);
} else {
// Let and const need a read barrier.
GetVar(r0, var);
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
if (var->mode() == LET) {
if (var->mode() == Variable::LET) {
Label done;
__ b(ne, &done);
__ mov(r0, Operand(var->name()));
@ -1510,23 +1490,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForAccumulatorValue(subexpr);
// Store the subexpression value in the array's elements.
__ ldr(r6, MemOperand(sp)); // Copy of array literal.
__ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
__ ldr(r1, MemOperand(sp)); // Copy of array literal.
__ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ str(result_register(), FieldMemOperand(r1, offset));
Label no_map_change;
__ JumpIfSmi(result_register(), &no_map_change);
// Update the write barrier for the array store with r0 as the scratch
// register.
__ RecordWriteField(
r1, offset, result_register(), r2, kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
__ CheckFastSmiOnlyElements(r3, r2, &no_map_change);
__ push(r6); // Copy of array literal.
__ CallRuntime(Runtime::kNonSmiElementStored, 1);
__ bind(&no_map_change);
__ RecordWrite(r1, Operand(offset), r2, result_register());
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
@ -1873,7 +1844,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
} else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
__ push(r0); // Value.
@ -1898,12 +1869,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// RecordWrite may destroy all its register arguments.
__ mov(r3, result_register());
int offset = Context::SlotOffset(var->index());
__ RecordWriteContextSlot(
r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
__ RecordWrite(r1, Operand(offset), r2, r3);
}
}
} else if (var->mode() != CONST) {
} else if (var->mode() != Variable::CONST) {
// Assignment to var or initializing assignment to let.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, r1);
@ -1917,9 +1887,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ str(r0, location);
if (var->IsContextSlot()) {
__ mov(r3, r0);
int offset = Context::SlotOffset(var->index());
__ RecordWriteContextSlot(
r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
__ RecordWrite(r1, Operand(Context::SlotOffset(var->index())), r2, r3);
}
} else {
ASSERT(var->IsLookupSlot());
@ -2139,8 +2107,10 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
__ push(r1);
// Push the strict mode flag. In harmony mode every eval call
// is a strict mode eval call.
StrictModeFlag strict_mode =
FLAG_harmony_scoping ? kStrictMode : strict_mode_flag();
StrictModeFlag strict_mode = strict_mode_flag();
if (FLAG_harmony_block_scoping) {
strict_mode = kStrictMode;
}
__ mov(r1, Operand(Smi::FromInt(strict_mode)));
__ push(r1);
@ -2186,7 +2156,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// context lookup in the runtime system.
Label done;
Variable* var = proxy->var();
if (!var->IsUnallocated() && var->mode() == DYNAMIC_GLOBAL) {
if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
Label slow;
EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
// Push the function and resolve eval.
@ -2692,24 +2662,20 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
// Assume that there are only two callable types, and one of them is at
// either end of the type range for JS object types. Saves extra comparisons.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
// Map is now in r0.
__ b(lt, &null);
STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
FIRST_SPEC_OBJECT_TYPE + 1);
__ b(eq, &function);
__ cmp(r1, Operand(LAST_SPEC_OBJECT_TYPE));
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
LAST_SPEC_OBJECT_TYPE - 1);
__ b(eq, &function);
// Assume that there is no larger type.
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
// As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
// FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
// LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
__ cmp(r1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
__ b(ge, &function);
// Check if the constructor in the map is a function.
__ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
__ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
__ b(ne, &non_function_constructor);
@ -2887,9 +2853,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
__ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
__ mov(r2, r0);
__ RecordWriteField(
r1, JSValue::kValueOffset, r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
__ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
__ bind(&done);
context()->Plug(r0);
@ -3177,31 +3141,16 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ str(scratch1, MemOperand(index2, 0));
__ str(scratch2, MemOperand(index1, 0));
Label no_remembered_set;
__ CheckPageFlag(elements,
scratch1,
1 << MemoryChunk::SCAN_ON_SCAVENGE,
ne,
&no_remembered_set);
Label new_space;
__ InNewSpace(elements, scratch1, eq, &new_space);
// Possible optimization: do a check that both values are Smis
// (or them and test against Smi mask.)
// We are swapping two objects in an array and the incremental marker never
// pauses in the middle of scanning a single object. Therefore the
// incremental marker is not disturbed, so we don't need to call the
// RecordWrite stub that notifies the incremental marker.
__ RememberedSetHelper(elements,
index1,
scratch2,
kDontSaveFPRegs,
MacroAssembler::kFallThroughAtEnd);
__ RememberedSetHelper(elements,
index2,
scratch2,
kDontSaveFPRegs,
MacroAssembler::kFallThroughAtEnd);
__ mov(scratch1, elements);
__ RecordWriteHelper(elements, index1, scratch2);
__ RecordWriteHelper(scratch1, index2, scratch2); // scratch1 holds elements.
__ bind(&no_remembered_set);
__ bind(&new_space);
// We are done. Drop elements from the stack, and return undefined.
__ Drop(3);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
@ -3949,14 +3898,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Handle<String> check) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
Handle<String> check,
Label* if_true,
Label* if_false,
Label* fall_through) {
{ AccumulatorValueContext context(this);
VisitForTypeofValue(expr);
}
@ -3997,11 +3942,9 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(r0, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE);
__ b(eq, if_true);
__ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE));
Split(eq, if_true, if_false, fall_through);
__ CompareObjectType(r0, r1, r0, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
Split(ge, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(r0, if_false);
if (!FLAG_harmony_typeof) {
@ -4020,7 +3963,18 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else {
if (if_false != fall_through) __ jmp(if_false);
}
context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
Label* if_true,
Label* if_false,
Label* fall_through) {
VisitForAccumulatorValue(expr);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
Split(eq, if_true, if_false, fall_through);
}
@ -4028,12 +3982,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
SetSourcePosition(expr->position());
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
if (TryLiteralCompare(expr)) return;
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@ -4041,6 +3992,13 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
context()->Plug(if_true, if_false);
return;
}
Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (op) {
@ -4127,9 +4085,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Expression* sub_expr,
NilValue nil) {
void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
Comment cmnt(masm_, "[ CompareToNull");
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@ -4137,21 +4094,15 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
VisitForAccumulatorValue(sub_expr);
VisitForAccumulatorValue(expr->expression());
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Heap::RootListIndex nil_value = nil == kNullValue ?
Heap::kNullValueRootIndex :
Heap::kUndefinedValueRootIndex;
__ LoadRoot(r1, nil_value);
__ LoadRoot(r1, Heap::kNullValueRootIndex);
__ cmp(r0, r1);
if (expr->op() == Token::EQ_STRICT) {
if (expr->is_strict()) {
Split(eq, if_true, if_false, fall_through);
} else {
Heap::RootListIndex other_nil_value = nil == kNullValue ?
Heap::kUndefinedValueRootIndex :
Heap::kNullValueRootIndex;
__ b(eq, if_true);
__ LoadRoot(r1, other_nil_value);
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
__ cmp(r0, r1);
__ b(eq, if_true);
__ JumpIfSmi(r0, if_false);

123
deps/v8/src/arm/ic-arm.cc

@ -208,8 +208,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Update the write barrier. Make sure not to clobber the value.
__ mov(scratch1, value);
__ RecordWrite(
elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ RecordWrite(elements, scratch2, scratch1);
}
@ -505,8 +504,7 @@ static void GenerateCallMiss(MacroAssembler* masm,
// Get the receiver of the function from the stack.
__ ldr(r3, MemOperand(sp, argc * kPointerSize));
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
// Push the receiver and the name of the function.
__ Push(r3, r2);
@ -520,7 +518,7 @@ static void GenerateCallMiss(MacroAssembler* masm,
// Move result to r1 and leave the internal frame.
__ mov(r1, Operand(r0));
}
__ LeaveInternalFrame();
// Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC.
@ -652,13 +650,12 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
__ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
__ push(r2); // save the key
__ Push(r1, r2); // pass the receiver and the key
__ CallRuntime(Runtime::kKeyedGetProperty, 2);
__ pop(r2); // restore the key
}
__ LeaveInternalFrame();
__ mov(r1, r0);
__ jmp(&do_call);
@ -911,8 +908,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, &notin, &slow);
__ str(r0, mapped_location);
__ add(r6, r3, r5);
__ mov(r9, r0);
__ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ RecordWrite(r3, r6, r9);
__ Ret();
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in r3.
@ -920,8 +916,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow);
__ str(r0, unmapped_location);
__ add(r6, r3, r4);
__ mov(r9, r0);
__ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ RecordWrite(r3, r6, r9);
__ Ret();
__ bind(&slow);
GenerateMiss(masm, false);
@ -1272,17 +1267,13 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- r2 : receiver
// -- lr : return address
// -----------------------------------
Label slow, array, extra, check_if_double_array;
Label fast_object_with_map_check, fast_object_without_map_check;
Label fast_double_with_map_check, fast_double_without_map_check;
Label slow, fast, array, extra;
// Register usage.
Register value = r0;
Register key = r1;
Register receiver = r2;
Register elements = r3; // Elements array of the receiver.
Register elements_map = r6;
Register receiver_map = r7;
// r4 and r5 are used as general scratch registers.
// Check that the key is a smi.
@ -1290,26 +1281,35 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, &slow);
// Get the map of the object.
__ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
__ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &slow);
// Check if the object is a JS array or not.
__ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ cmp(r4, Operand(JS_ARRAY_TYPE));
__ b(eq, &array);
// Check that the object is some kind of JSObject.
__ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
__ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE));
__ b(lt, &slow);
__ cmp(r4, Operand(JS_PROXY_TYPE));
__ b(eq, &slow);
__ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(eq, &slow);
// Object case: Check key against length in the elements array.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check that the object is in fast mode and writable.
__ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r4, ip);
__ b(ne, &slow);
// Check array bounds. Both the key and the length of FixedArray are smis.
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(ip));
__ b(lo, &fast_object_with_map_check);
__ b(lo, &fast);
// Slow case, handle jump to runtime.
__ bind(&slow);
@ -1330,31 +1330,21 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(ip));
__ b(hs, &slow);
__ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
__ cmp(elements_map,
Operand(masm->isolate()->factory()->fixed_array_map()));
__ b(ne, &check_if_double_array);
// Calculate key + 1 as smi.
STATIC_ASSERT(kSmiTag == 0);
__ add(r4, key, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ b(&fast_object_without_map_check);
__ bind(&check_if_double_array);
__ cmp(elements_map,
Operand(masm->isolate()->factory()->fixed_double_array_map()));
__ b(ne, &slow);
// Add 1 to key, and go to common element store code for doubles.
STATIC_ASSERT(kSmiTag == 0);
__ add(r4, key, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ jmp(&fast_double_without_map_check);
__ b(&fast);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
// is the length is always a smi.
__ bind(&array);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r4, ip);
__ b(ne, &slow);
// Check the key against the length in the array.
__ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
@ -1362,57 +1352,18 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ b(hs, &extra);
// Fall through to fast case.
__ bind(&fast_object_with_map_check);
Register scratch_value = r4;
Register address = r5;
__ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
__ cmp(elements_map,
Operand(masm->isolate()->factory()->fixed_array_map()));
__ b(ne, &fast_double_with_map_check);
__ bind(&fast_object_without_map_check);
// Smi stores don't require further checks.
Label non_smi_value;
__ JumpIfNotSmi(value, &non_smi_value);
// It's irrelevant whether array is smi-only or not when writing a smi.
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value, MemOperand(address));
__ Ret();
__ bind(&non_smi_value);
// Escape to slow case when writing non-smi into smi-only array.
__ CheckFastObjectElements(receiver_map, scratch_value, &slow);
// Fast elements array, store the value to the elements backing store.
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value, MemOperand(address));
__ bind(&fast);
// Fast case, store the value to the elements backing store.
__ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value, MemOperand(r5));
// Skip write barrier if the written value is a smi.
__ tst(value, Operand(kSmiTagMask));
__ Ret(eq);
// Update write barrier for the elements array address.
__ mov(scratch_value, value); // Preserve the value which is returned.
__ RecordWrite(elements,
address,
scratch_value,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Ret();
__ sub(r4, r5, Operand(elements));
__ RecordWrite(elements, Operand(r4), r5, r6);
__ bind(&fast_double_with_map_check);
// Check for fast double array case. If this fails, call through to the
// runtime.
__ cmp(elements_map,
Operand(masm->isolate()->factory()->fixed_double_array_map()));
__ b(ne, &slow);
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value,
key,
receiver,
elements,
r4,
r5,
r6,
r7,
&slow);
__ Ret();
}

36
deps/v8/src/arm/lithium-arm.cc

@ -212,11 +212,10 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
}
void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
InputAt(0)->PrintTo(stream);
stream->Add(kind() == kStrictEquality ? " === " : " == ");
stream->Add(nil() == kNullValue ? "null" : "undefined");
stream->Add(is_strict() ? " === null" : " == null");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
@ -712,9 +711,7 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
instr->set_environment(CreateEnvironment(hydrogen_env,
&argument_index_accumulator));
instr->set_environment(CreateEnvironment(hydrogen_env));
return instr;
}
@ -997,13 +994,10 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
LEnvironment* LChunkBuilder::CreateEnvironment(
HEnvironment* hydrogen_env,
int* argument_index_accumulator) {
LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
if (hydrogen_env == NULL) return NULL;
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
int value_count = hydrogen_env->length();
@ -1013,6 +1007,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
argument_count_,
value_count,
outer);
int argument_index = 0;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@ -1021,7 +1016,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
op = new LArgument((*argument_index_accumulator)++);
op = new LArgument(argument_index++);
} else {
op = UseAny(value);
}
@ -1449,9 +1444,9 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
}
LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
return new LIsNilAndBranch(UseRegisterAtStart(instr->value()));
return new LIsNullAndBranch(UseRegisterAtStart(instr->value()));
}
@ -1739,7 +1734,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LLoadGlobalCell* result = new LLoadGlobalCell;
return instr->RequiresHoleCheck()
return instr->check_hole_value()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
@ -1753,11 +1748,14 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
if (instr->check_hole_value()) {
LOperand* temp = TempRegister();
LOperand* value = UseTempRegister(instr->value());
LInstruction* result = new LStoreGlobalCell(value, temp);
if (instr->RequiresHoleCheck()) result = AssignEnvironment(result);
return result;
LOperand* value = UseRegister(instr->value());
return AssignEnvironment(new LStoreGlobalCell(value, temp));
} else {
LOperand* value = UseRegisterAtStart(instr->value());
return new LStoreGlobalCell(value, NULL);
}
}

16
deps/v8/src/arm/lithium-arm.h

@ -107,7 +107,7 @@ class LCodeGen;
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsNilAndBranch) \
V(IsNullAndBranch) \
V(IsObjectAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@ -627,17 +627,16 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
};
class LIsNilAndBranch: public LControlInstruction<1, 0> {
class LIsNullAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsNilAndBranch(LOperand* value) {
explicit LIsNullAndBranch(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
EqualityKind kind() const { return hydrogen()->kind(); }
NilValue nil() const { return hydrogen()->nil(); }
bool is_strict() const { return hydrogen()->is_strict(); }
virtual void PrintDataTo(StringStream* stream);
};
@ -2160,8 +2159,7 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment();
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
int* argument_index_accumulator);
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
void VisitInstruction(HInstruction* current);

176
deps/v8/src/arm/lithium-codegen-arm.cc

@ -82,14 +82,6 @@ bool LCodeGen::GenerateCode() {
status_ = GENERATING;
CpuFeatures::Scope scope1(VFP3);
CpuFeatures::Scope scope2(ARMv7);
CodeStub::GenerateFPStubs();
// Open a frame scope to indicate that there is a frame on the stack. The
// NONE indicates that the scope shouldn't actually generate code to set up
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::NONE);
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
@ -214,11 +206,13 @@ bool LCodeGen::GeneratePrologue() {
// Load parameter from stack.
__ ldr(r0, MemOperand(fp, parameter_offset));
// Store it in the context.
MemOperand target = ContextOperand(cp, var->index());
__ str(r0, target);
// Update the write barrier. This clobbers r3 and r0.
__ RecordWriteContextSlot(
cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs);
__ mov(r1, Operand(Context::SlotOffset(var->index())));
__ str(r0, MemOperand(cp, r1));
// Update the write barrier. This clobbers all involved
// registers, so we have to use two more registers to avoid
// clobbering cp.
__ mov(r2, Operand(cp));
__ RecordWrite(r2, Operand(r1), r3, r0);
}
}
Comment(";;; End allocate local context");
@ -268,9 +262,6 @@ bool LCodeGen::GenerateDeferredCode() {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
Comment(";;; Deferred code @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
code->Generate();
__ jmp(code->exit());
}
@ -748,7 +739,7 @@ void LCodeGen::RecordSafepoint(
int deoptimization_index) {
ASSERT(expected_safepoint_kind_ == kind);
const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
kind, arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
@ -1041,7 +1032,6 @@ void LCodeGen::DoDivI(LDivI* instr) {
virtual void Generate() {
codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
}
virtual LInstruction* instr() { return instr_; }
private:
LDivI* instr_;
};
@ -1753,35 +1743,25 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
}
void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Register scratch = scratch0();
Register reg = ToRegister(instr->InputAt(0));
int false_block = chunk_->LookupDestination(instr->false_block_id());
// If the expression is known to be untagged or a smi, then it's definitely
// not null, and it can't be a an undetectable object.
if (instr->hydrogen()->representation().IsSpecialization() ||
instr->hydrogen()->type().IsSmi()) {
EmitGoto(false_block);
return;
}
// TODO(fsc): If the expression is known to be a smi, then it's
// definitely not null. Jump to the false block.
int true_block = chunk_->LookupDestination(instr->true_block_id());
Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
Heap::kNullValueRootIndex :
Heap::kUndefinedValueRootIndex;
__ LoadRoot(ip, nil_value);
int false_block = chunk_->LookupDestination(instr->false_block_id());
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(reg, ip);
if (instr->kind() == kStrictEquality) {
if (instr->is_strict()) {
EmitBranch(true_block, false_block, eq);
} else {
Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
Heap::kUndefinedValueRootIndex :
Heap::kNullValueRootIndex;
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ b(eq, true_label);
__ LoadRoot(ip, other_nil_value);
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(reg, ip);
__ b(eq, true_label);
__ JumpIfSmi(reg, false_label);
@ -1938,36 +1918,28 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
ASSERT(!input.is(temp));
ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
__ JumpIfSmi(input, is_false);
if (class_name->IsEqualTo(CStrVector("Function"))) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
FIRST_SPEC_OBJECT_TYPE + 1);
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
LAST_SPEC_OBJECT_TYPE - 1);
STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
__ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
__ b(lt, is_false);
__ b(eq, is_true);
__ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
__ b(eq, is_true);
// Map is now in temp.
// Functions have class 'Function'.
__ CompareInstanceType(temp, temp2, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) {
__ b(ge, is_true);
} else {
// Faster code path to avoid two compares: subtract lower bound from the
// actual type and do a signed compare with the width of the type range.
__ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
__ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
__ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ b(gt, is_false);
__ b(ge, is_false);
}
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
__ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
// As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type and
// FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
// LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
// Objects with a non-function constructor have class 'Object'.
__ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
if (class_name->IsEqualTo(CStrVector("Object"))) {
@ -2044,8 +2016,9 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
virtual void Generate() {
codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
}
virtual LInstruction* instr() { return instr_; }
Label* map_check() { return &map_check_; }
private:
LInstanceOfKnownGlobal* instr_;
Label map_check_;
@ -2207,7 +2180,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
__ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
__ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) {
if (instr->hydrogen()->check_hole_value()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
DeoptimizeIf(eq, instr->environment());
@ -2230,7 +2203,6 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->InputAt(0));
Register scratch = scratch0();
Register scratch2 = ToRegister(instr->TempAt(0));
// Load the cell.
__ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
@ -2239,7 +2211,8 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted.
if (instr->hydrogen()->RequiresHoleCheck()) {
if (instr->hydrogen()->check_hole_value()) {
Register scratch2 = ToRegister(instr->TempAt(0));
__ ldr(scratch2,
FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
@ -2249,15 +2222,6 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// Store the value.
__ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
// Cells are always in the remembered set.
__ RecordWriteField(scratch,
JSGlobalPropertyCell::kValueOffset,
value,
scratch2,
kLRHasBeenSaved,
kSaveFPRegs,
OMIT_REMEMBERED_SET);
}
@ -2283,15 +2247,10 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
MemOperand target = ContextOperand(context, instr->slot_index());
__ str(value, target);
__ str(value, ContextOperand(context, instr->slot_index()));
if (instr->needs_write_barrier()) {
__ RecordWriteContextSlot(context,
target.offset(),
value,
scratch0(),
kLRHasBeenSaved,
kSaveFPRegs);
int offset = Context::SlotOffset(instr->slot_index());
__ RecordWrite(context, Operand(offset), value, scratch0());
}
}
@ -2541,9 +2500,13 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
}
if (instr->hydrogen()->RequiresHoleCheck()) {
// TODO(danno): If no hole check is required, there is no need to allocate
// elements into a temporary register, instead scratch can be used.
__ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr->environment());
}
__ vldr(result, elements, 0);
}
@ -2614,7 +2577,6 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -2944,7 +2906,6 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
virtual void Generate() {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
virtual LInstruction* instr() { return instr_; }
private:
LUnaryMathOperation* instr_;
};
@ -3241,7 +3202,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ Drop(1);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -3301,8 +3262,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ str(value, FieldMemOperand(object, offset));
if (instr->needs_write_barrier()) {
// Update the write barrier for the object for in-object properties.
__ RecordWriteField(
object, offset, value, scratch, kLRHasBeenSaved, kSaveFPRegs);
__ RecordWrite(object, Operand(offset), value, scratch);
}
} else {
__ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
@ -3310,8 +3270,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->needs_write_barrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
__ RecordWriteField(
scratch, offset, value, object, kLRHasBeenSaved, kSaveFPRegs);
__ RecordWrite(scratch, Operand(offset), value, object);
}
}
}
@ -3342,13 +3301,6 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
Register scratch = scratch0();
// This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
// conversion, so it deopts in that case.
if (instr->hydrogen()->ValueNeedsSmiCheck()) {
__ tst(value, Operand(kSmiTagMask));
DeoptimizeIf(ne, instr->environment());
}
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@ -3363,8 +3315,8 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Compute address of modified element and store it into key register.
__ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ RecordWrite(elements, key, value, kLRHasBeenSaved, kSaveFPRegs);
__ add(key, scratch, Operand(FixedArray::kHeaderSize));
__ RecordWrite(elements, key, value);
}
}
@ -3465,7 +3417,6 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3501,7 +3452,6 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@ -3625,7 +3575,6 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LStringCharFromCode* instr_;
};
@ -3697,7 +3646,6 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LNumberTagI* instr_;
};
@ -3763,7 +3711,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LNumberTagD* instr_;
};
@ -3872,6 +3819,16 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
}
class DeferredTaggedToI: public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
private:
LTaggedToI* instr_;
};
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register input_reg = ToRegister(instr->InputAt(0));
Register scratch1 = scratch0();
@ -3954,16 +3911,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
class DeferredTaggedToI: public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LTaggedToI* instr_;
};
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
@ -4396,12 +4343,10 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = ne;
} else if (type_name->Equals(heap()->function_symbol())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
__ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE);
__ b(eq, true_label);
__ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE));
final_branch_condition = eq;
__ CompareObjectType(input, input, scratch,
FIRST_CALLABLE_SPEC_OBJECT_TYPE);
final_branch_condition = ge;
} else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label);
@ -4523,7 +4468,6 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LStackCheck* instr_;
};

7
deps/v8/src/arm/lithium-codegen-arm.h

@ -376,20 +376,16 @@ class LCodeGen BASE_EMBEDDED {
class LDeferredCode: public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen),
external_exit_(NULL),
instruction_index_(codegen->current_instruction_) {
: codegen_(codegen), external_exit_(NULL) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() { }
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label *exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
@ -400,7 +396,6 @@ class LDeferredCode: public ZoneObject {
Label entry_;
Label exit_;
Label* external_exit_;
int instruction_index_;
};
} } // namespace v8::internal

560
deps/v8/src/arm/macro-assembler-arm.cc

@ -42,8 +42,7 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
allow_stub_calls_(true),
has_frame_(false) {
allow_stub_calls_(true) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
isolate());
@ -407,6 +406,32 @@ void MacroAssembler::StoreRoot(Register source,
}
void MacroAssembler::RecordWriteHelper(Register object,
Register address,
Register scratch) {
if (emit_debug_code()) {
// Check that the object is not in new space.
Label not_in_new_space;
InNewSpace(object, scratch, ne, &not_in_new_space);
Abort("new-space object passed to RecordWriteHelper");
bind(&not_in_new_space);
}
// Calculate page address.
Bfc(object, 0, kPageSizeBits);
// Calculate region number.
Ubfx(address, address, Page::kRegionSizeLog2,
kPageSizeBits - Page::kRegionSizeLog2);
// Mark region dirty.
ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
mov(ip, Operand(1));
orr(scratch, scratch, Operand(ip, LSL, address));
str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
}
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cond,
@ -418,52 +443,38 @@ void MacroAssembler::InNewSpace(Register object,
}
void MacroAssembler::RecordWriteField(
Register object,
int offset,
Register value,
Register dst,
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
// Will clobber 4 registers: object, offset, scratch, ip. The
// register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
void MacroAssembler::RecordWrite(Register object,
Operand offset,
Register scratch0,
Register scratch1) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are cp.
ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
// Skip barrier if writing a smi.
if (smi_check == INLINE_SMI_CHECK) {
JumpIfSmi(value, &done);
}
Label done;
// Although the object register is tagged, the offset is relative to the start
// of the object, so so offset must be a multiple of kPointerSize.
ASSERT(IsAligned(offset, kPointerSize));
// First, test that the object is not in the new space. We cannot set
// region marks for new space pages.
InNewSpace(object, scratch0, eq, &done);
add(dst, object, Operand(offset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
tst(dst, Operand((1 << kPointerSizeLog2) - 1));
b(eq, &ok);
stop("Unaligned cell in write barrier");
bind(&ok);
}
// Add offset into the object.
add(scratch0, object, offset);
RecordWrite(object,
dst,
value,
lr_status,
save_fp,
remembered_set_action,
OMIT_SMI_CHECK);
// Record the actual write.
RecordWriteHelper(object, scratch0, scratch1);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
mov(object, Operand(BitCast<int32_t>(kZapValue)));
mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
}
}
@ -473,94 +484,29 @@ void MacroAssembler::RecordWriteField(
// tag is shifted away.
void MacroAssembler::RecordWrite(Register object,
Register address,
Register value,
LinkRegisterStatus lr_status,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
Register scratch) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are cp.
ASSERT(!address.is(cp) && !value.is(cp));
ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
Label done;
if (smi_check == INLINE_SMI_CHECK) {
ASSERT_EQ(0, kSmiTag);
tst(value, Operand(kSmiTagMask));
b(eq, &done);
}
CheckPageFlag(value,
value, // Used as scratch.
MemoryChunk::kPointersToHereAreInterestingMask,
eq,
&done);
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
eq,
&done);
// First, test that the object is not in the new space. We cannot set
// region marks for new space pages.
InNewSpace(object, scratch, eq, &done);
// Record the actual write.
if (lr_status == kLRHasNotBeenSaved) {
push(lr);
}
RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
CallStub(&stub);
if (lr_status == kLRHasNotBeenSaved) {
pop(lr);
}
RecordWriteHelper(object, address, scratch);
bind(&done);
// Clobber clobbered registers when running with the debug-code flag
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
}
}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address,
Register scratch,
SaveFPRegsMode fp_mode,
RememberedSetFinalAction and_then) {
Label done;
if (FLAG_debug_code) {
Label ok;
JumpIfNotInNewSpace(object, scratch, &ok);
stop("Remembered set pointer is in new space");
bind(&ok);
}
// Load store buffer top.
ExternalReference store_buffer =
ExternalReference::store_buffer_top(isolate());
mov(ip, Operand(store_buffer));
ldr(scratch, MemOperand(ip));
// Store pointer to buffer and increment buffer top.
str(address, MemOperand(scratch, kPointerSize, PostIndex));
// Write back new top of buffer.
str(scratch, MemOperand(ip));
// Call stub on end of buffer.
// Check for end of buffer.
tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
if (and_then == kFallThroughAtEnd) {
b(eq, &done);
} else {
ASSERT(and_then == kReturnAtEnd);
Ret(eq);
}
push(lr);
StoreBufferOverflowStub store_buffer_overflow =
StoreBufferOverflowStub(fp_mode);
CallStub(&store_buffer_overflow);
pop(lr);
bind(&done);
if (and_then == kReturnAtEnd) {
Ret();
mov(object, Operand(BitCast<int32_t>(kZapValue)));
mov(address, Operand(BitCast<int32_t>(kZapValue)));
mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
}
}
@ -1015,9 +961,6 @@ void MacroAssembler::InvokeCode(Register code,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
@ -1045,9 +988,6 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
RelocInfo::Mode rmode,
InvokeFlag flag,
CallKind call_kind) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
Label done;
InvokePrologue(expected, actual, code, no_reg, &done, flag,
@ -1071,9 +1011,6 @@ void MacroAssembler::InvokeFunction(Register fun,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in r1.
ASSERT(fun.is(r1));
@ -1098,9 +1035,6 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
const ParameterCount& actual,
InvokeFlag flag,
CallKind call_kind) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
ASSERT(function->is_compiled());
// Get the function and setup the context.
@ -1156,10 +1090,10 @@ void MacroAssembler::IsObjectJSStringType(Register object,
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
ASSERT(allow_stub_calls());
mov(r0, Operand(0, RelocInfo::NONE));
mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
CEntryStub ces(1);
ASSERT(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
#endif
@ -1859,127 +1793,13 @@ void MacroAssembler::CompareRoot(Register obj,
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 0);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
b(hi, fail);
}
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 1);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
b(ls, fail);
cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
b(hi, fail);
}
void MacroAssembler::CheckFastSmiOnlyElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
b(hi, fail);
}
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register receiver_reg,
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Label* fail) {
Label smi_value, maybe_nan, have_double_value, is_nan, done;
Register mantissa_reg = scratch2;
Register exponent_reg = scratch3;
// Handle smi values specially.
JumpIfSmi(value_reg, &smi_value);
// Ensure that the object is a heap number
CheckMap(value_reg,
scratch1,
isolate()->factory()->heap_number_map(),
fail,
DONT_DO_SMI_CHECK);
// Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
// in the exponent.
mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
cmp(exponent_reg, scratch1);
b(ge, &maybe_nan);
ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
bind(&have_double_value);
add(scratch1, elements_reg,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
str(exponent_reg, FieldMemOperand(scratch1, offset));
jmp(&done);
bind(&maybe_nan);
// Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
// it's an Infinity, and the non-NaN code path applies.
b(gt, &is_nan);
ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
cmp(mantissa_reg, Operand(0));
b(eq, &have_double_value);
bind(&is_nan);
// Load canonical NaN for storing into the double array.
uint64_t nan_int64 = BitCast<uint64_t>(
FixedDoubleArray::canonical_not_the_hole_nan_as_double());
mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
jmp(&have_double_value);
bind(&smi_value);
add(scratch1, elements_reg,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
add(scratch1, scratch1,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
// scratch1 is now effective address of the double element
FloatingPointHelper::Destination destination;
if (CpuFeatures::IsSupported(VFP3)) {
destination = FloatingPointHelper::kVFPRegisters;
} else {
destination = FloatingPointHelper::kCoreRegisters;
}
Register untagged_value = receiver_reg;
SmiUntag(untagged_value, value_reg);
FloatingPointHelper::ConvertIntToDouble(this,
untagged_value,
destination,
d0,
mantissa_reg,
exponent_reg,
scratch4,
s2);
if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatures::Scope scope(VFP3);
vstr(d0, scratch1, 0);
} else {
str(mantissa_reg, MemOperand(scratch1, 0));
str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
}
bind(&done);
}
void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
@ -2075,13 +1895,13 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond);
}
MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) {
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Object* result;
{ MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result;
@ -2093,12 +1913,13 @@ MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) {
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Object* result;
{ MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result;
@ -2201,12 +2022,6 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
}
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
}
void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
add(sp, sp, Operand(num_arguments * kPointerSize));
@ -2602,7 +2417,8 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
mov(r0, Operand(function->nargs));
mov(r1, Operand(ExternalReference(function, isolate())));
CEntryStub stub(1, kSaveFPRegs);
CEntryStub stub(1);
stub.SaveDoubles();
CallStub(&stub);
}
@ -2675,9 +2491,6 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference(
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a builtin without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
GetBuiltinEntry(r2, id);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(r2));
@ -2809,20 +2622,14 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment(msg);
}
#endif
// Disable stub call restrictions to always allow calls to abort.
AllowStubCallsScope allow_scope(this, true);
mov(r0, Operand(p0));
push(r0);
mov(r0, Operand(Smi::FromInt(p1 - p0)));
push(r0);
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
CallRuntime(Runtime::kAbort, 2);
} else {
CallRuntime(Runtime::kAbort, 2);
}
// will not return here
if (is_const_pool_blocked()) {
// If the calling code cares about the exact number of
@ -3123,19 +2930,6 @@ void MacroAssembler::CopyBytes(Register src,
}
void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
Register end_offset,
Register filler) {
Label loop, entry;
b(&entry);
bind(&loop);
str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
bind(&entry);
cmp(start_offset, end_offset);
b(lt, &loop);
}
void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
Register source, // Input.
Register scratch) {
@ -3295,15 +3089,23 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
void MacroAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
mov(ip, Operand(function));
CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
CallCFunctionHelper(no_reg,
function,
ip,
num_reg_arguments,
num_double_arguments);
}
void MacroAssembler::CallCFunction(Register function,
Register scratch,
int num_reg_arguments,
int num_double_arguments) {
CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
CallCFunctionHelper(function,
ExternalReference::the_hole_value_location(isolate()),
scratch,
num_reg_arguments,
num_double_arguments);
}
@ -3314,15 +3116,17 @@ void MacroAssembler::CallCFunction(ExternalReference function,
void MacroAssembler::CallCFunction(Register function,
Register scratch,
int num_arguments) {
CallCFunction(function, num_arguments, 0);
CallCFunction(function, scratch, num_arguments, 0);
}
void MacroAssembler::CallCFunctionHelper(Register function,
ExternalReference function_reference,
Register scratch,
int num_reg_arguments,
int num_double_arguments) {
ASSERT(has_frame());
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
// provides more information.
@ -3346,6 +3150,10 @@ void MacroAssembler::CallCFunctionHelper(Register function,
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.
if (function.is(no_reg)) {
mov(scratch, Operand(function_reference));
function = scratch;
}
Call(function);
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
@ -3377,185 +3185,6 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
}
void MacroAssembler::CheckPageFlag(
Register object,
Register scratch,
int mask,
Condition cc,
Label* condition_met) {
and_(scratch, object, Operand(~Page::kPageAlignmentMask));
ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
tst(scratch, Operand(mask));
b(cc, condition_met);
}
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black) {
HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
}
void MacroAssembler::HasColor(Register object,
Register bitmap_scratch,
Register mask_scratch,
Label* has_color,
int first_bit,
int second_bit) {
ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
GetMarkBits(object, bitmap_scratch, mask_scratch);
Label other_color, word_boundary;
ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
tst(ip, Operand(mask_scratch));
b(first_bit == 1 ? eq : ne, &other_color);
// Shift left 1 by adding.
add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
b(eq, &word_boundary);
tst(ip, Operand(mask_scratch));
b(second_bit == 1 ? ne : eq, has_color);
jmp(&other_color);
bind(&word_boundary);
ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
tst(ip, Operand(1));
b(second_bit == 1 ? ne : eq, has_color);
bind(&other_color);
}
// Detect some, but not all, common pointer-free objects. This is used by the
// incremental write barrier which doesn't care about oddballs (they are always
// marked black immediately so this code is not hit).
void MacroAssembler::JumpIfDataObject(Register value,
Register scratch,
Label* not_data_object) {
Label is_data_object;
ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
b(eq, &is_data_object);
ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
b(ne, not_data_object);
bind(&is_data_object);
}
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
mov(ip, Operand(1));
mov(mask_reg, Operand(ip, LSL, mask_reg));
}
void MacroAssembler::EnsureNotWhite(
Register value,
Register bitmap_scratch,
Register mask_scratch,
Register load_scratch,
Label* value_is_white_and_not_data) {
ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
Label done;
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
tst(mask_scratch, load_scratch);
b(ne, &done);
if (FLAG_debug_code) {
// Check for impossible bit pattern.
Label ok;
// LSL may overflow, making the check conservative.
tst(load_scratch, Operand(mask_scratch, LSL, 1));
b(eq, &ok);
stop("Impossible marking bit pattern");
bind(&ok);
}
// Value is white. We check whether it is data that doesn't need scanning.
// Currently only checks for HeapNumber and non-cons strings.
Register map = load_scratch; // Holds map while checking type.
Register length = load_scratch; // Holds length of object after testing type.
Label is_data_object;
// Check for heap-number
ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
CompareRoot(map, Heap::kHeapNumberMapRootIndex);
mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
b(eq, &is_data_object);
// Check for strings.
ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
Register instance_type = load_scratch;
ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
b(ne, value_is_white_and_not_data);
// It's a non-indirect (non-cons and non-slice) string.
// If it's external, the length is just ExternalString::kSize.
// Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
// External strings are the only ones with the kExternalStringTag bit
// set.
ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
tst(instance_type, Operand(kExternalStringTag));
mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
b(ne, &is_data_object);
// Sequential string, either ASCII or UC16.
// For ASCII (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
ldr(ip, FieldMemOperand(value, String::kLengthOffset));
tst(instance_type, Operand(kStringEncodingMask));
mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
and_(length, length, Operand(~kObjectAlignmentMask));
bind(&is_data_object);
// Value is a data object, and it is white. Mark it black. Since we know
// that the object is white we can make it black by flipping one bit.
ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
orr(ip, ip, Operand(mask_scratch));
str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
add(ip, ip, Operand(length));
str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
bind(&done);
}
void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
Usat(output_reg, 8, Operand(input_reg));
}
@ -3605,17 +3234,6 @@ void MacroAssembler::LoadInstanceDescriptors(Register map,
}
bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
if (r1.is(r2)) return true;
if (r1.is(r3)) return true;
if (r1.is(r4)) return true;
if (r2.is(r3)) return true;
if (r2.is(r4)) return true;
if (r3.is(r4)) return true;
return false;
}
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),

222
deps/v8/src/arm/macro-assembler-arm.h

@ -29,7 +29,6 @@
#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
#include "assembler.h"
#include "frames.h"
#include "v8globals.h"
namespace v8 {
@ -80,14 +79,6 @@ enum ObjectToDoubleFlags {
};
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
bool AreAliased(Register r1, Register r2, Register r3, Register r4);
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
@ -166,126 +157,40 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
Condition cond = al);
// ---------------------------------------------------------------------------
// GC Support
void IncrementalMarkingRecordWriteHelper(Register object,
Register value,
Register address);
enum RememberedSetFinalAction {
kReturnAtEnd,
kFallThroughAtEnd
};
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
// in new space.
void RememberedSetHelper(Register object, // Used for debug code.
Register addr,
Register scratch,
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then);
void CheckPageFlag(Register object,
Register scratch,
int mask,
Condition cc,
Label* condition_met);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,
// Check if object is in new space.
// scratch can be object itself, but it will be clobbered.
void InNewSpace(Register object,
Register scratch,
Label* branch) {
InNewSpace(object, scratch, ne, branch);
}
Condition cond, // eq for new space, ne otherwise
Label* branch);
// Check if object is in new space. Jumps if the object is in new space.
// The register scratch can be object itself, but it will be clobbered.
void JumpIfInNewSpace(Register object,
Register scratch,
Label* branch) {
InNewSpace(object, scratch, eq, branch);
}
// Check if an object has a given incremental marking color.
void HasColor(Register object,
Register scratch0,
Register scratch1,
Label* has_color,
int first_bit,
int second_bit);
// For the page containing |object| mark the region covering [address]
// dirty. The object address must be in the first 8K of an allocated page.
void RecordWriteHelper(Register object,
Register address,
Register scratch);
void JumpIfBlack(Register object,
// For the page containing |object| mark the region covering
// [object+offset] dirty. The object address must be in the first 8K
// of an allocated page. The 'scratch' registers are used in the
// implementation and all 3 registers are clobbered by the
// operation, as well as the ip register. RecordWrite updates the
// write barrier even when storing smis.
void RecordWrite(Register object,
Operand offset,
Register scratch0,
Register scratch1,
Label* on_black);
// Checks the color of an object. If the object is already grey or black
// then we just fall through, since it is already live. If it is white and
// we can determine that it doesn't need to be scanned, then we just mark it
// black and fall through. For the rest we jump to the label so the
// incremental marker can fix its assumptions.
void EnsureNotWhite(Register object,
Register scratch1,
Register scratch2,
Register scratch3,
Label* object_is_white_and_not_data);
// Detects conservatively whether an object is data-only, ie it does need to
// be scanned by the garbage collector.
void JumpIfDataObject(Register value,
Register scratch,
Label* not_data_object);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldOperand(reg, off).
void RecordWriteField(
Register object,
int offset,
Register value,
Register scratch,
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
// As above, but the offset has the tag presubtracted. For use with
// MemOperand(reg, off).
inline void RecordWriteContextSlot(
Register context,
int offset,
Register value,
Register scratch,
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK) {
RecordWriteField(context,
offset + kHeapObjectTag,
value,
scratch,
lr_status,
save_fp,
remembered_set_action,
smi_check);
}
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
// address registers are clobbered by the operation.
void RecordWrite(
Register object,
Register scratch1);
// For the page containing |object| mark the region covering
// [address] dirty. The object address must be in the first 8K of an
// allocated page. All 3 registers are clobbered by the operation,
// as well as the ip register. RecordWrite updates the write barrier
// even when storing smis.
void RecordWrite(Register object,
Register address,
Register value,
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
Register scratch);
// Push a handle.
void Push(Handle<Object> handle);
@ -413,6 +318,16 @@ class MacroAssembler: public Assembler {
const double imm,
const Condition cond = al);
// ---------------------------------------------------------------------------
// Activation frames
void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
void EnterExitFrame(bool save_doubles, int stack_space = 0);
@ -654,13 +569,6 @@ class MacroAssembler: public Assembler {
Register length,
Register scratch);
// Initialize fields with filler values. Fields starting at |start_offset|
// not including end_offset are overwritten with the value in |filler|. At
// the end the loop, |start_offset| takes the value of |end_offset|.
void InitializeFieldsWithFiller(Register start_offset,
Register end_offset,
Register filler);
// ---------------------------------------------------------------------------
// Support functions.
@ -700,31 +608,6 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* fail);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map,
Register scratch,
Label* fail);
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
void CheckFastSmiOnlyElements(Register map,
Register scratch,
Label* fail);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
// the FastDoubleElements array elements, otherwise jump to fail.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register receiver_reg,
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Label* fail);
// Check if the map of an object is equal to a specified map (either
// given directly or as an index into the root list) and branch to
// label if not. Skip the smi check if not required (object is known
@ -947,11 +830,11 @@ class MacroAssembler: public Assembler {
// return address (unless this is somehow accounted for by the called
// function).
void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, int num_arguments);
void CallCFunction(Register function, Register scratch, int num_arguments);
void CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments);
void CallCFunction(Register function,
void CallCFunction(Register function, Register scratch,
int num_reg_arguments,
int num_double_arguments);
@ -1019,9 +902,6 @@ class MacroAssembler: public Assembler {
bool generating_stub() { return generating_stub_; }
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
inline bool AllowThisStubCall(CodeStub* stub);
// EABI variant for double arguments in use.
bool use_eabi_hardfloat() {
@ -1168,12 +1048,10 @@ class MacroAssembler: public Assembler {
void LoadInstanceDescriptors(Register map, Register descriptors);
// Activation support.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
private:
void CallCFunctionHelper(Register function,
ExternalReference function_reference,
Register scratch,
int num_reg_arguments,
int num_double_arguments);
@ -1189,25 +1067,16 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper,
CallKind call_kind);
// Activation support.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
void InitializeNewString(Register string,
Register length,
Heap::RootListIndex map_index,
Register scratch1,
Register scratch2);
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object,
Register scratch,
Condition cond, // eq for new space, ne otherwise.
Label* branch);
// Helper for finding the mark bits for an address. Afterwards, the
// bitmap register points at the word with the mark bits and the mask
// the position of the first bit. Leaves addr_reg unchanged.
inline void GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg);
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
MemOperand SafepointRegisterSlot(Register reg);
@ -1215,7 +1084,6 @@ class MacroAssembler: public Assembler {
bool generating_stub_;
bool allow_stub_calls_;
bool has_frame_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;

9
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -371,12 +371,9 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
// Isolate.
__ mov(r3, Operand(ExternalReference::isolate_address()));
{
AllowExternalCallThatCantCauseGC scope(masm_);
ExternalReference function =
ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
__ CallCFunction(function, argument_count);
}
// Check if function returned non-zero for success or zero for failure.
__ cmp(r0, Operand(0, RelocInfo::NONE));
@ -614,12 +611,6 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Entry code:
__ bind(&entry_label_);
// Tell the system that we have a stack frame. Because the type is MANUAL, no
// is generated.
FrameScope scope(masm_, StackFrame::MANUAL);
// Actually emit code to start a new stack frame.
// Push arguments
// Save callee-save registers.
// Start new stack frame.

2
deps/v8/src/arm/simulator-arm.cc

@ -1618,8 +1618,6 @@ void Simulator::HandleRList(Instruction* instr, bool load) {
ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address);
intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
// Catch null pointers a little earlier.
ASSERT(start_address > 8191 || start_address < 0);
int reg = 0;
while (rlist != 0) {
if ((rlist & 1) != 0) {

269
deps/v8/src/arm/stub-cache-arm.cc

@ -431,13 +431,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Pass the now unused name_reg as a scratch register.
__ mov(name_reg, r0);
__ RecordWriteField(receiver_reg,
offset,
name_reg,
scratch,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
__ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@ -450,13 +444,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
__ mov(name_reg, r0);
__ RecordWriteField(scratch,
offset,
name_reg,
receiver_reg,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
__ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
}
// Return the value (register r0).
@ -565,8 +553,7 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
}
static MaybeObject* GenerateFastApiDirectCall(
MacroAssembler* masm,
static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
const CallOptimization& optimization,
int argc) {
// ----------- S t a t e -------------
@ -604,8 +591,6 @@ static MaybeObject* GenerateFastApiDirectCall(
ApiFunction fun(api_function_address);
const int kApiStackSpace = 4;
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
// r0 = v8::Arguments&
@ -631,11 +616,9 @@ static MaybeObject* GenerateFastApiDirectCall(
ExternalReference ref = ExternalReference(&fun,
ExternalReference::DIRECT_API_CALL,
masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
}
class CallInterceptorCompiler BASE_EMBEDDED {
public:
CallInterceptorCompiler(StubCompiler* stub_compiler,
@ -811,7 +794,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
miss_label);
// Call a runtime function to load the interceptor property.
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
// Save the name_ register across the call.
__ push(name_);
@ -828,8 +811,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Restore the name_ register.
__ pop(name_);
// Leave the internal frame.
__ LeaveInternalFrame();
}
void LoadWithInterceptor(MacroAssembler* masm,
@ -838,8 +820,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
JSObject* holder_obj,
Register scratch,
Label* interceptor_succeeded) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
__ Push(holder, name_);
CompileCallLoadPropertyWithInterceptor(masm,
@ -850,7 +831,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ pop(name_); // Restore the name.
__ pop(receiver); // Restore the holder.
}
__ LeaveInternalFrame();
// If interceptor returns no-result sentinel, call the constant function.
__ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
@ -1247,10 +1228,7 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
ApiFunction fun(getter_address);
const int kApiStackSpace = 1;
FrameScope frame_scope(masm(), StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
// Create AccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object **args_) as the data.
__ str(scratch2, MemOperand(sp, 1 * kPointerSize));
@ -1310,8 +1288,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
{
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
__ EnterInternalFrame();
if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
// CALLBACKS case needs a receiver to be passed into C++ callback.
@ -1335,7 +1312,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
__ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
__ cmp(r0, scratch1);
__ b(eq, &interceptor_failed);
frame_scope.GenerateLeaveFrame();
__ LeaveInternalFrame();
__ Ret();
__ bind(&interceptor_failed);
@ -1345,8 +1322,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
__ pop(receiver);
}
// Leave the internal frame.
}
__ LeaveInternalFrame();
// Check that the maps from interceptor's holder to lookup's holder
// haven't changed. And load lookup's holder into |holder| register.
@ -1580,7 +1556,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
DONT_DO_SMI_CHECK);
if (argc == 1) { // Otherwise fall through to call the builtin.
Label attempt_to_grow_elements;
Label exit, with_write_barrier, attempt_to_grow_elements;
// Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
@ -1595,15 +1571,11 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ cmp(r0, r4);
__ b(gt, &attempt_to_grow_elements);
// Check if value is a smi.
Label with_write_barrier;
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
__ JumpIfNotSmi(r4, &with_write_barrier);
// Save new length.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Push the element.
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ add(end_elements, elements,
@ -1613,31 +1585,14 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
// Check for a smi.
__ JumpIfNotSmi(r4, &with_write_barrier);
__ bind(&exit);
__ Drop(argc + 1);
__ Ret();
__ bind(&with_write_barrier);
__ ldr(r6, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ CheckFastSmiOnlyElements(r6, r6, &call_builtin);
// Save new length.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Push the element.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ add(end_elements, elements,
Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
__ RecordWrite(elements,
end_elements,
r4,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ InNewSpace(elements, r4, eq, &exit);
__ RecordWriteHelper(elements, end_elements, r4);
__ Drop(argc + 1);
__ Ret();
@ -1649,15 +1604,6 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ b(&call_builtin);
}
__ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
// Growing elements that are SMI-only requires special handling in case
// the new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check;
__ JumpIfSmi(r2, &no_fast_elements_check);
__ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ CheckFastObjectElements(r7, r7, &call_builtin);
__ bind(&no_fast_elements_check);
Isolate* isolate = masm()->isolate();
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate);
@ -1684,7 +1630,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Update new_space_allocation_top.
__ str(r6, MemOperand(r7));
// Push the argument.
__ str(r2, MemOperand(end_elements));
__ ldr(r6, MemOperand(sp, (argc - 1) * kPointerSize));
__ str(r6, MemOperand(end_elements));
// Fill the rest with holes.
__ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) {
@ -2766,15 +2713,6 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// Store the value in the cell.
__ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
__ mov(r1, r0);
__ RecordWriteField(r4,
JSGlobalPropertyCell::kValueOffset,
r1,
r2,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET);
Counters* counters = masm()->isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3);
__ Ret();
@ -3178,7 +3116,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
}
MaybeObject* KeyedLoadStubCompiler::CompileLoadPolymorphic(
MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
MapList* receiver_maps,
CodeList* handler_ics) {
// ----------- S t a t e -------------
@ -3274,10 +3212,9 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
}
MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic(
MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
MapList* receiver_maps,
CodeList* handler_stubs,
MapList* transitioned_maps) {
CodeList* handler_ics) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@ -3290,20 +3227,12 @@ MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic(
int receiver_count = receiver_maps->length();
__ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
for (int i = 0; i < receiver_count; ++i) {
Handle<Map> map(receiver_maps->at(i));
Handle<Code> code(handler_stubs->at(i));
for (int current = 0; current < receiver_count; ++current) {
Handle<Map> map(receiver_maps->at(current));
Handle<Code> code(handler_ics->at(current));
__ mov(ip, Operand(map));
__ cmp(r3, ip);
if (transitioned_maps->at(i) == NULL) {
__ Jump(code, RelocInfo::CODE_TARGET, eq);
} else {
Label next_map;
__ b(eq, &next_map);
__ mov(r4, Operand(Handle<Map>(transitioned_maps->at(i))));
__ Jump(code, RelocInfo::CODE_TARGET, al);
__ bind(&next_map);
}
}
__ bind(&miss);
@ -3525,7 +3454,6 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@ -3612,7 +3540,6 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
}
break;
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@ -3953,7 +3880,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
break;
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@ -4017,7 +3943,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@ -4157,7 +4082,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@ -4310,10 +4234,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
}
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,
ElementsKind elements_kind) {
void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
bool is_js_array) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@ -4322,7 +4244,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// -- r3 : scratch
// -- r4 : scratch (elements)
// -----------------------------------
Label miss_force_generic, transition_elements_kind;
Label miss_force_generic;
Register value_reg = r0;
Register key_reg = r1;
@ -4355,33 +4277,15 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ cmp(key_reg, scratch);
__ b(hs, &miss_force_generic);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind);
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ add(scratch,
scratch,
Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value_reg, MemOperand(scratch));
} else {
ASSERT(elements_kind == FAST_ELEMENTS);
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ add(scratch,
scratch,
Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value_reg, MemOperand(scratch));
__ mov(receiver_reg, value_reg);
__ RecordWrite(elements_reg, // Object.
scratch, // Address.
receiver_reg, // Value.
kLRHasNotBeenSaved,
kDontSaveFPRegs);
}
__ str(value_reg,
MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
__ RecordWrite(scratch,
Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize),
receiver_reg , elements_reg);
// value_reg (r0) is preserved.
// Done.
__ Ret();
@ -4390,10 +4294,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
Handle<Code> ic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ Jump(ic, RelocInfo::CODE_TARGET);
__ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic_miss, RelocInfo::CODE_TARGET);
}
@ -4409,15 +4309,15 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- r4 : scratch
// -- r5 : scratch
// -----------------------------------
Label miss_force_generic, transition_elements_kind;
Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
Register value_reg = r0;
Register key_reg = r1;
Register receiver_reg = r2;
Register elements_reg = r3;
Register scratch1 = r4;
Register scratch2 = r5;
Register scratch3 = r6;
Register scratch = r3;
Register elements_reg = r4;
Register mantissa_reg = r5;
Register exponent_reg = r6;
Register scratch4 = r7;
// This stub is meant to be tail-jumped to, the receiver must already
@ -4429,25 +4329,90 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// Check that the key is within bounds.
if (is_js_array) {
__ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
} else {
__ ldr(scratch1,
__ ldr(scratch,
FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
}
// Compare smis, unsigned compare catches both negative and out-of-bound
// indexes.
__ cmp(key_reg, scratch1);
__ cmp(key_reg, scratch);
__ b(hs, &miss_force_generic);
__ StoreNumberToDoubleElements(value_reg,
key_reg,
receiver_reg,
elements_reg,
scratch1,
scratch2,
scratch3,
// Handle smi values specially.
__ JumpIfSmi(value_reg, &smi_value);
// Ensure that the object is a heap number
__ CheckMap(value_reg,
scratch,
masm->isolate()->factory()->heap_number_map(),
&miss_force_generic,
DONT_DO_SMI_CHECK);
// Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
// in the exponent.
__ mov(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
__ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
__ cmp(exponent_reg, scratch);
__ b(ge, &maybe_nan);
__ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
__ bind(&have_double_value);
__ add(scratch, elements_reg,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
__ str(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
__ str(exponent_reg, FieldMemOperand(scratch, offset));
__ Ret();
__ bind(&maybe_nan);
// Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
// it's an Infinity, and the non-NaN code path applies.
__ b(gt, &is_nan);
__ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
__ cmp(mantissa_reg, Operand(0));
__ b(eq, &have_double_value);
__ bind(&is_nan);
// Load canonical NaN for storing into the double array.
uint64_t nan_int64 = BitCast<uint64_t>(
FixedDoubleArray::canonical_not_the_hole_nan_as_double());
__ mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
__ mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
__ jmp(&have_double_value);
__ bind(&smi_value);
__ add(scratch, elements_reg,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
__ add(scratch, scratch,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
// scratch is now effective address of the double element
FloatingPointHelper::Destination destination;
if (CpuFeatures::IsSupported(VFP3)) {
destination = FloatingPointHelper::kVFPRegisters;
} else {
destination = FloatingPointHelper::kCoreRegisters;
}
Register untagged_value = receiver_reg;
__ SmiUntag(untagged_value, value_reg);
FloatingPointHelper::ConvertIntToDouble(
masm,
untagged_value,
destination,
d0,
mantissa_reg,
exponent_reg,
scratch4,
&transition_elements_kind);
s2);
if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatures::Scope scope(VFP3);
__ vstr(d0, scratch, 0);
} else {
__ str(mantissa_reg, MemOperand(scratch, 0));
__ str(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
}
__ Ret();
// Handle store cache miss, replacing the ic with the generic stub.
@ -4455,10 +4420,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Handle<Code> ic =
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ Jump(ic, RelocInfo::CODE_TARGET);
__ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic_miss, RelocInfo::CODE_TARGET);
}

151
deps/v8/src/array.js

@ -201,14 +201,17 @@ function ConvertToString(x) {
function ConvertToLocaleString(e) {
if (IS_NULL_OR_UNDEFINED(e)) {
if (e == null) {
return '';
} else {
// According to ES5, seciton 15.4.4.3, the toLocaleString conversion
// must throw a TypeError if ToObject(e).toLocaleString isn't
// callable.
// e_obj's toLocaleString might be overwritten, check if it is a function.
// Call ToString if toLocaleString is not a function.
// See issue 877615.
var e_obj = ToObject(e);
return %ToString(e_obj.toLocaleString());
if (IS_SPEC_FUNCTION(e_obj.toLocaleString))
return ToString(e_obj.toLocaleString());
else
return ToString(e);
}
}
@ -378,31 +381,18 @@ function SimpleMove(array, start_i, del_count, len, num_additional_args) {
function ArrayToString() {
var array;
var func;
if (IS_ARRAY(this)) {
func = this.join;
if (func === ArrayJoin) {
return Join(this, this.length, ',', ConvertToString);
}
array = this;
} else {
array = ToObject(this);
func = array.join;
}
if (!IS_SPEC_FUNCTION(func)) {
return %_CallFunction(array, ObjectToString);
if (!IS_ARRAY(this)) {
throw new $TypeError('Array.prototype.toString is not generic');
}
return %_CallFunction(array, func);
return Join(this, this.length, ',', ConvertToString);
}
function ArrayToLocaleString() {
var array = ToObject(this);
var arrayLen = array.length;
var len = TO_UINT32(arrayLen);
if (len === 0) return "";
return Join(array, len, ',', ConvertToLocaleString);
if (!IS_ARRAY(this)) {
throw new $TypeError('Array.prototype.toString is not generic');
}
return Join(this, this.length, ',', ConvertToLocaleString);
}
@ -1003,24 +993,21 @@ function ArrayFilter(f, receiver) {
["Array.prototype.filter"]);
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var length = ToUint32(array.length);
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = ToUint32(this.length);
var result = [];
var result_length = 0;
for (var i = 0; i < length; i++) {
var current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
if (%_CallFunction(receiver, current, i, array, f)) {
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
if (%_CallFunction(receiver, current, i, this, f)) {
result[result_length++] = current;
}
}
@ -1035,22 +1022,19 @@ function ArrayForEach(f, receiver) {
["Array.prototype.forEach"]);
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var length = TO_UINT32(array.length);
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = TO_UINT32(this.length);
for (var i = 0; i < length; i++) {
var current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
%_CallFunction(receiver, current, i, array, f);
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
%_CallFunction(receiver, current, i, this, f);
}
}
}
@ -1064,22 +1048,19 @@ function ArraySome(f, receiver) {
["Array.prototype.some"]);
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var length = TO_UINT32(array.length);
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = TO_UINT32(this.length);
for (var i = 0; i < length; i++) {
var current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
if (%_CallFunction(receiver, current, i, array, f)) return true;
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
if (%_CallFunction(receiver, current, i, this, f)) return true;
}
}
return false;
@ -1092,22 +1073,19 @@ function ArrayEvery(f, receiver) {
["Array.prototype.every"]);
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var length = TO_UINT32(array.length);
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = TO_UINT32(this.length);
for (var i = 0; i < length; i++) {
var current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
if (!%_CallFunction(receiver, current, i, array, f)) return false;
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
if (!%_CallFunction(receiver, current, i, this, f)) return false;
}
}
return true;
@ -1119,24 +1097,21 @@ function ArrayMap(f, receiver) {
["Array.prototype.map"]);
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var length = TO_UINT32(array.length);
if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = TO_UINT32(this.length);
var result = new $Array();
var accumulator = new InternalArray(length);
for (var i = 0; i < length; i++) {
var current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
accumulator[i] = %_CallFunction(receiver, current, i, array, f);
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
accumulator[i] = %_CallFunction(receiver, current, i, this, f);
}
}
%MoveArrayContents(accumulator, result);
@ -1270,20 +1245,19 @@ function ArrayReduce(callback, current) {
["Array.prototype.reduce"]);
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var length = ToUint32(array.length);
if (!IS_SPEC_FUNCTION(callback)) {
throw MakeTypeError('called_non_callable', [callback]);
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = ToUint32(this.length);
var i = 0;
find_initial: if (%_ArgumentsLength() < 2) {
for (; i < length; i++) {
current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
i++;
break find_initial;
}
@ -1293,9 +1267,9 @@ function ArrayReduce(callback, current) {
var receiver = %GetDefaultReceiver(callback);
for (; i < length; i++) {
var element = array[i];
if (!IS_UNDEFINED(element) || i in array) {
current = %_CallFunction(receiver, current, element, i, array, callback);
var element = this[i];
if (!IS_UNDEFINED(element) || i in this) {
current = %_CallFunction(receiver, current, element, i, this, callback);
}
}
return current;
@ -1307,20 +1281,15 @@ function ArrayReduceRight(callback, current) {
["Array.prototype.reduceRight"]);
}
// Pull out the length so that side effects are visible before the
// callback function is checked.
var array = ToObject(this);
var length = ToUint32(array.length);
if (!IS_SPEC_FUNCTION(callback)) {
throw MakeTypeError('called_non_callable', [callback]);
}
var i = ToUint32(this.length) - 1;
var i = length - 1;
find_initial: if (%_ArgumentsLength() < 2) {
for (; i >= 0; i--) {
current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
i--;
break find_initial;
}
@ -1330,9 +1299,9 @@ function ArrayReduceRight(callback, current) {
var receiver = %GetDefaultReceiver(callback);
for (; i >= 0; i--) {
var element = array[i];
if (!IS_UNDEFINED(element) || i in array) {
current = %_CallFunction(receiver, current, element, i, array, callback);
var element = this[i];
if (!IS_UNDEFINED(element) || i in this) {
current = %_CallFunction(receiver, current, element, i, this, callback);
}
}
return current;

56
deps/v8/src/assembler.cc

@ -38,7 +38,6 @@
#include "deoptimizer.h"
#include "execution.h"
#include "ic-inl.h"
#include "incremental-marking.h"
#include "factory.h"
#include "runtime.h"
#include "runtime-profiler.h"
@ -48,7 +47,6 @@
#include "ast.h"
#include "regexp-macro-assembler.h"
#include "platform.h"
#include "store-buffer.h"
// Include native regexp-macro-assembler.
#ifndef V8_INTERPRETED_REGEXP
#if V8_TARGET_ARCH_IA32
@ -518,7 +516,6 @@ void RelocIterator::next() {
RelocIterator::RelocIterator(Code* code, int mode_mask) {
rinfo_.host_ = code;
rinfo_.pc_ = code->instruction_start();
rinfo_.data_ = 0;
// Relocation info is read backwards.
@ -739,38 +736,9 @@ ExternalReference::ExternalReference(const SCTableReference& table_ref)
: address_(table_ref.address()) {}
ExternalReference ExternalReference::
incremental_marking_record_write_function(Isolate* isolate) {
return ExternalReference(Redirect(
isolate,
FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode)));
}
ExternalReference ExternalReference::
incremental_evacuation_record_write_function(Isolate* isolate) {
return ExternalReference(Redirect(
isolate,
FUNCTION_ADDR(IncrementalMarking::RecordWriteForEvacuationFromCode)));
}
ExternalReference ExternalReference::
store_buffer_overflow_function(Isolate* isolate) {
return ExternalReference(Redirect(
isolate,
FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow)));
}
ExternalReference ExternalReference::flush_icache_function(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CPU::FlushICache)));
}
ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) {
return
ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::PerformGC)));
return ExternalReference(Redirect(isolate,
FUNCTION_ADDR(Runtime::PerformGC)));
}
@ -834,6 +802,17 @@ ExternalReference ExternalReference::keyed_lookup_cache_field_offsets(
}
ExternalReference ExternalReference::the_hole_value_location(Isolate* isolate) {
return ExternalReference(isolate->factory()->the_hole_value().location());
}
ExternalReference ExternalReference::arguments_marker_location(
Isolate* isolate) {
return ExternalReference(isolate->factory()->arguments_marker().location());
}
ExternalReference ExternalReference::roots_address(Isolate* isolate) {
return ExternalReference(isolate->heap()->roots_address());
}
@ -861,14 +840,9 @@ ExternalReference ExternalReference::new_space_start(Isolate* isolate) {
}
ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
return ExternalReference(isolate->heap()->store_buffer()->TopAddress());
}
ExternalReference ExternalReference::new_space_mask(Isolate* isolate) {
return ExternalReference(reinterpret_cast<Address>(
isolate->heap()->NewSpaceMask()));
Address mask = reinterpret_cast<Address>(isolate->heap()->NewSpaceMask());
return ExternalReference(mask);
}

30
deps/v8/src/assembler.h

@ -143,9 +143,6 @@ class Label BASE_EMBEDDED {
};
enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs };
// -----------------------------------------------------------------------------
// Relocation information
@ -219,9 +216,8 @@ class RelocInfo BASE_EMBEDDED {
RelocInfo() {}
RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host)
: pc_(pc), rmode_(rmode), data_(data), host_(host) {
RelocInfo(byte* pc, Mode rmode, intptr_t data)
: pc_(pc), rmode_(rmode), data_(data) {
}
static inline bool IsConstructCall(Mode mode) {
@ -230,9 +226,6 @@ class RelocInfo BASE_EMBEDDED {
static inline bool IsCodeTarget(Mode mode) {
return mode <= LAST_CODE_ENUM;
}
static inline bool IsEmbeddedObject(Mode mode) {
return mode == EMBEDDED_OBJECT;
}
// Is the relocation mode affected by GC?
static inline bool IsGCRelocMode(Mode mode) {
return mode <= LAST_GCED_ENUM;
@ -265,7 +258,6 @@ class RelocInfo BASE_EMBEDDED {
void set_pc(byte* pc) { pc_ = pc; }
Mode rmode() const { return rmode_; }
intptr_t data() const { return data_; }
Code* host() const { return host_; }
// Apply a relocation by delta bytes
INLINE(void apply(intptr_t delta));
@ -361,7 +353,6 @@ class RelocInfo BASE_EMBEDDED {
byte* pc_;
Mode rmode_;
intptr_t data_;
Code* host_;
#ifdef V8_TARGET_ARCH_MIPS
// Code and Embedded Object pointers in mips are stored split
// across two consecutive 32-bit instructions. Heap management
@ -570,13 +561,6 @@ class ExternalReference BASE_EMBEDDED {
// pattern. This means that they have to be added to the
// ExternalReferenceTable in serialize.cc manually.
static ExternalReference incremental_marking_record_write_function(
Isolate* isolate);
static ExternalReference incremental_evacuation_record_write_function(
Isolate* isolate);
static ExternalReference store_buffer_overflow_function(
Isolate* isolate);
static ExternalReference flush_icache_function(Isolate* isolate);
static ExternalReference perform_gc_function(Isolate* isolate);
static ExternalReference fill_heap_number_with_random_function(
Isolate* isolate);
@ -593,6 +577,12 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference keyed_lookup_cache_keys(Isolate* isolate);
static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate);
// Static variable Factory::the_hole_value.location()
static ExternalReference the_hole_value_location(Isolate* isolate);
// Static variable Factory::arguments_marker.location()
static ExternalReference arguments_marker_location(Isolate* isolate);
// Static variable Heap::roots_address()
static ExternalReference roots_address(Isolate* isolate);
@ -616,10 +606,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference new_space_start(Isolate* isolate);
static ExternalReference new_space_mask(Isolate* isolate);
static ExternalReference heap_always_allocate_scope_depth(Isolate* isolate);
static ExternalReference new_space_mark_bits(Isolate* isolate);
// Write barrier.
static ExternalReference store_buffer_top(Isolate* isolate);
// Used for fast allocation in generated code.
static ExternalReference new_space_allocation_top_address(Isolate* isolate);

106
deps/v8/src/ast.cc

@ -327,77 +327,56 @@ bool BinaryOperation::ResultOverwriteAllowed() {
}
static bool IsTypeof(Expression* expr) {
UnaryOperation* maybe_unary = expr->AsUnaryOperation();
return maybe_unary != NULL && maybe_unary->op() == Token::TYPEOF;
}
// Check for the pattern: typeof <expression> equals <string literal>.
static bool MatchLiteralCompareTypeof(Expression* left,
Token::Value op,
Expression* right,
Expression** expr,
bool CompareOperation::IsLiteralCompareTypeof(Expression** expr,
Handle<String>* check) {
if (IsTypeof(left) && right->IsStringLiteral() && Token::IsEqualityOp(op)) {
*expr = left->AsUnaryOperation()->expression();
*check = Handle<String>::cast(right->AsLiteral()->handle());
if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return false;
UnaryOperation* left_unary = left_->AsUnaryOperation();
UnaryOperation* right_unary = right_->AsUnaryOperation();
Literal* left_literal = left_->AsLiteral();
Literal* right_literal = right_->AsLiteral();
// Check for the pattern: typeof <expression> == <string literal>.
if (left_unary != NULL && left_unary->op() == Token::TYPEOF &&
right_literal != NULL && right_literal->handle()->IsString()) {
*expr = left_unary->expression();
*check = Handle<String>::cast(right_literal->handle());
return true;
}
return false;
}
bool CompareOperation::IsLiteralCompareTypeof(Expression** expr,
Handle<String>* check) {
return MatchLiteralCompareTypeof(left_, op_, right_, expr, check) ||
MatchLiteralCompareTypeof(right_, op_, left_, expr, check);
}
static bool IsVoidOfLiteral(Expression* expr) {
UnaryOperation* maybe_unary = expr->AsUnaryOperation();
return maybe_unary != NULL &&
maybe_unary->op() == Token::VOID &&
maybe_unary->expression()->AsLiteral() != NULL;
}
// Check for the pattern: void <literal> equals <expression>
static bool MatchLiteralCompareUndefined(Expression* left,
Token::Value op,
Expression* right,
Expression** expr) {
if (IsVoidOfLiteral(left) && Token::IsEqualityOp(op)) {
*expr = right;
// Check for the pattern: <string literal> == typeof <expression>.
if (right_unary != NULL && right_unary->op() == Token::TYPEOF &&
left_literal != NULL && left_literal->handle()->IsString()) {
*expr = right_unary->expression();
*check = Handle<String>::cast(left_literal->handle());
return true;
}
return false;
}
bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
return MatchLiteralCompareUndefined(left_, op_, right_, expr) ||
MatchLiteralCompareUndefined(right_, op_, left_, expr);
}
if (op_ != Token::EQ_STRICT) return false;
UnaryOperation* left_unary = left_->AsUnaryOperation();
UnaryOperation* right_unary = right_->AsUnaryOperation();
// Check for the pattern: null equals <expression>
static bool MatchLiteralCompareNull(Expression* left,
Token::Value op,
Expression* right,
Expression** expr) {
if (left->IsNullLiteral() && Token::IsEqualityOp(op)) {
*expr = right;
// Check for the pattern: <expression> === void <literal>.
if (right_unary != NULL && right_unary->op() == Token::VOID &&
right_unary->expression()->AsLiteral() != NULL) {
*expr = left_;
return true;
}
return false;
}
// Check for the pattern: void <literal> === <expression>.
if (left_unary != NULL && left_unary->op() == Token::VOID &&
left_unary->expression()->AsLiteral() != NULL) {
*expr = right_;
return true;
}
bool CompareOperation::IsLiteralCompareNull(Expression** expr) {
return MatchLiteralCompareNull(left_, op_, right_, expr) ||
MatchLiteralCompareNull(right_, op_, left_, expr);
return false;
}
@ -550,9 +529,7 @@ bool Conditional::IsInlineable() const {
bool VariableProxy::IsInlineable() const {
return var()->IsUnallocated()
|| var()->IsStackAllocated()
|| var()->IsContextSlot();
return var()->IsUnallocated() || var()->IsStackAllocated();
}
@ -621,6 +598,11 @@ bool CompareOperation::IsInlineable() const {
}
bool CompareToNull::IsInlineable() const {
return expression()->IsInlineable();
}
bool CountOperation::IsInlineable() const {
return expression()->IsInlineable();
}
@ -764,13 +746,9 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
CallKind call_kind) {
is_monomorphic_ = oracle->CallIsMonomorphic(this);
Property* property = expression()->AsProperty();
if (property == NULL) {
// Function call. Specialize for monomorphic calls.
if (is_monomorphic_) target_ = oracle->GetCallTarget(this);
} else {
// Method call. Specialize for the receiver types seen at runtime.
ASSERT(property != NULL);
// Specialize for the receiver types seen at runtime.
Literal* key = property->key()->AsLiteral();
ASSERT(key != NULL && key->handle()->IsString());
Handle<String> name = Handle<String>::cast(key->handle());
@ -785,6 +763,7 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
}
}
#endif
is_monomorphic_ = oracle->CallIsMonomorphic(this);
check_type_ = oracle->GetCallCheckType(this);
if (is_monomorphic_) {
Handle<Map> map;
@ -799,7 +778,6 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
}
is_monomorphic_ = ComputeTarget(map, name);
}
}
}

41
deps/v8/src/ast.h

@ -90,6 +90,7 @@ namespace internal {
V(CountOperation) \
V(BinaryOperation) \
V(CompareOperation) \
V(CompareToNull) \
V(ThisFunction)
#define AST_NODE_LIST(V) \
@ -288,12 +289,6 @@ class Expression: public AstNode {
// True iff the expression is a literal represented as a smi.
virtual bool IsSmiLiteral() { return false; }
// True iff the expression is a string literal.
virtual bool IsStringLiteral() { return false; }
// True iff the expression is the null literal.
virtual bool IsNullLiteral() { return false; }
// Type feedback information for assignments and properties.
virtual bool IsMonomorphic() {
UNREACHABLE();
@ -398,29 +393,31 @@ class Block: public BreakableStatement {
class Declaration: public AstNode {
public:
Declaration(VariableProxy* proxy,
VariableMode mode,
Variable::Mode mode,
FunctionLiteral* fun,
Scope* scope)
: proxy_(proxy),
mode_(mode),
fun_(fun),
scope_(scope) {
ASSERT(mode == VAR || mode == CONST || mode == LET);
ASSERT(mode == Variable::VAR ||
mode == Variable::CONST ||
mode == Variable::LET);
// At the moment there are no "const functions"'s in JavaScript...
ASSERT(fun == NULL || mode == VAR || mode == LET);
ASSERT(fun == NULL || mode == Variable::VAR || mode == Variable::LET);
}
DECLARE_NODE_TYPE(Declaration)
VariableProxy* proxy() const { return proxy_; }
VariableMode mode() const { return mode_; }
Variable::Mode mode() const { return mode_; }
FunctionLiteral* fun() const { return fun_; } // may be NULL
virtual bool IsInlineable() const;
Scope* scope() const { return scope_; }
private:
VariableProxy* proxy_;
VariableMode mode_;
Variable::Mode mode_;
FunctionLiteral* fun_;
// Nested scope from which the declaration originated.
@ -894,8 +891,6 @@ class Literal: public Expression {
virtual bool IsTrivial() { return true; }
virtual bool IsSmiLiteral() { return handle_->IsSmi(); }
virtual bool IsStringLiteral() { return handle_->IsString(); }
virtual bool IsNullLiteral() { return handle_->IsNull(); }
// Check if this literal is identical to the other literal.
bool IsIdenticalTo(const Literal* other) const {
@ -1470,7 +1465,6 @@ class CompareOperation: public Expression {
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
bool IsLiteralCompareUndefined(Expression** expr);
bool IsLiteralCompareNull(Expression** expr);
private:
Token::Value op_;
@ -1483,6 +1477,25 @@ class CompareOperation: public Expression {
};
class CompareToNull: public Expression {
public:
CompareToNull(Isolate* isolate, bool is_strict, Expression* expression)
: Expression(isolate), is_strict_(is_strict), expression_(expression) { }
DECLARE_NODE_TYPE(CompareToNull)
virtual bool IsInlineable() const;
bool is_strict() const { return is_strict_; }
Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; }
Expression* expression() const { return expression_; }
private:
bool is_strict_;
Expression* expression_;
};
class Conditional: public Expression {
public:
Conditional(Isolate* isolate,

47
deps/v8/src/bootstrapper.cc

@ -34,7 +34,6 @@
#include "debug.h"
#include "execution.h"
#include "global-handles.h"
#include "isolate-inl.h"
#include "macro-assembler.h"
#include "natives.h"
#include "objects-visiting.h"
@ -996,26 +995,6 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
initial_map->instance_size() + 5 * kPointerSize);
initial_map->set_instance_descriptors(*descriptors);
initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map));
// RegExp prototype object is itself a RegExp.
Handle<Map> proto_map = factory->CopyMapDropTransitions(initial_map);
proto_map->set_prototype(global_context()->initial_object_prototype());
Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
heap->empty_string());
proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex,
heap->false_value());
proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex,
heap->false_value());
proto->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex,
heap->false_value());
proto->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
Smi::FromInt(0),
SKIP_WRITE_BARRIER); // It's a Smi.
initial_map->set_prototype(*proto);
factory->SetRegExpIrregexpData(Handle<JSRegExp>::cast(proto),
JSRegExp::IRREGEXP, factory->empty_string(),
JSRegExp::Flags(0), 0);
}
{ // -- J S O N
@ -1097,11 +1076,6 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
elements->set(0, *array);
array = factory->NewFixedArray(0);
elements->set(1, *array);
Handle<Map> non_strict_arguments_elements_map =
factory->GetElementsTransitionMap(result,
NON_STRICT_ARGUMENTS_ELEMENTS);
result->set_map(*non_strict_arguments_elements_map);
ASSERT(result->HasNonStrictArgumentsElements());
result->set_elements(*elements);
global_context()->set_aliased_arguments_boilerplate(*result);
}
@ -1353,8 +1327,6 @@ void Genesis::InstallNativeFunctions() {
configure_instance_fun);
INSTALL_NATIVE(JSFunction, "GetStackTraceLine", get_stack_trace_line_fun);
INSTALL_NATIVE(JSObject, "functionCache", function_cache);
INSTALL_NATIVE(JSFunction, "ToCompletePropertyDescriptor",
to_complete_property_descriptor);
}
void Genesis::InstallExperimentalNativeFunctions() {
@ -1583,18 +1555,6 @@ bool Genesis::InstallNatives() {
isolate()->builtins()->builtin(Builtins::kArrayConstructCode));
array_function->shared()->DontAdaptArguments();
// InternalArrays should not use Smi-Only array optimizations. There are too
// many places in the C++ runtime code (e.g. RegEx) that assume that
// elements in InternalArrays can be set to non-Smi values without going
// through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT
// transition easy to trap. Moreover, they rarely are smi-only.
MaybeObject* maybe_map =
array_function->initial_map()->CopyDropTransitions();
Map* new_map;
if (!maybe_map->To<Map>(&new_map)) return maybe_map;
new_map->set_elements_kind(FAST_ELEMENTS);
array_function->set_initial_map(new_map);
// Make "length" magic on instances.
Handle<DescriptorArray> array_descriptors =
factory()->CopyAppendForeignDescriptor(
@ -1978,10 +1938,9 @@ bool Genesis::InstallExtension(v8::RegisteredExtension* current) {
if (!InstallExtension(extension->dependencies()[i])) return false;
}
Isolate* isolate = Isolate::Current();
Handle<String> source_code =
isolate->factory()->NewExternalStringFromAscii(extension->source());
bool result = CompileScriptCached(
CStrVector(extension->name()),
Vector<const char> source = CStrVector(extension->source());
Handle<String> source_code = isolate->factory()->NewStringFromAscii(source);
bool result = CompileScriptCached(CStrVector(extension->name()),
source_code,
isolate->bootstrapper()->extensions_cache(),
extension,

141
deps/v8/src/builtins.cc

@ -33,7 +33,6 @@
#include "builtins.h"
#include "gdb-jit.h"
#include "ic-inl.h"
#include "mark-compact.h"
#include "vm-state-inl.h"
namespace v8 {
@ -203,7 +202,7 @@ BUILTIN(ArrayCodeGeneric) {
}
// 'array' now contains the JSArray we should initialize.
ASSERT(array->HasFastTypeElements());
ASSERT(array->HasFastElements());
// Optimize the case where there is one argument and the argument is a
// small smi.
@ -216,8 +215,7 @@ BUILTIN(ArrayCodeGeneric) {
{ MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
MaybeObject* maybe_obj = array->SetContent(FixedArray::cast(obj));
if (maybe_obj->IsFailure()) return maybe_obj;
array->SetContent(FixedArray::cast(obj));
return array;
}
}
@ -241,11 +239,6 @@ BUILTIN(ArrayCodeGeneric) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
// Set length and elements on the array.
MaybeObject* maybe_object =
array->EnsureCanContainElements(FixedArray::cast(obj));
if (maybe_object->IsFailure()) return maybe_object;
AssertNoAllocation no_gc;
FixedArray* elms = FixedArray::cast(obj);
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
@ -254,6 +247,7 @@ BUILTIN(ArrayCodeGeneric) {
elms->set(index, args[index+1], mode);
}
// Set length and elements on the array.
array->set_elements(FixedArray::cast(obj));
array->set_length(len);
@ -301,7 +295,6 @@ static void CopyElements(Heap* heap,
if (mode == UPDATE_WRITE_BARRIER) {
heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
}
heap->incremental_marking()->RecordWrites(dst);
}
@ -320,7 +313,6 @@ static void MoveElements(Heap* heap,
if (mode == UPDATE_WRITE_BARRIER) {
heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
}
heap->incremental_marking()->RecordWrites(dst);
}
@ -366,14 +358,6 @@ static FixedArray* LeftTrimFixedArray(Heap* heap,
former_start[to_trim] = heap->fixed_array_map();
former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
// Maintain marking consistency for HeapObjectIterator and
// IncrementalMarking.
int size_delta = to_trim * kPointerSize;
if (heap->marking()->TransferMark(elms->address(),
elms->address() + size_delta)) {
MemoryChunk::IncrementLiveBytes(elms->address(), -size_delta);
}
return FixedArray::cast(HeapObject::FromAddress(
elms->address() + to_trim * kPointerSize));
}
@ -400,42 +384,20 @@ static bool ArrayPrototypeHasNoElements(Heap* heap,
MUST_USE_RESULT
static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
Heap* heap, Object* receiver, Arguments* args, int first_added_arg) {
Heap* heap, Object* receiver) {
if (!receiver->IsJSArray()) return NULL;
JSArray* array = JSArray::cast(receiver);
HeapObject* elms = array->elements();
Map* map = elms->map();
if (map == heap->fixed_array_map()) {
if (args == NULL || !array->HasFastSmiOnlyElements()) {
return elms;
}
} else if (map == heap->fixed_cow_array_map()) {
MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
if (args == NULL || !array->HasFastSmiOnlyElements() ||
maybe_writable_result->IsFailure()) {
return maybe_writable_result;
if (elms->map() == heap->fixed_array_map()) return elms;
if (elms->map() == heap->fixed_cow_array_map()) {
return array->EnsureWritableFastElements();
}
} else {
return NULL;
}
// Need to ensure that the arguments passed in args can be contained in
// the array.
int args_length = args->length();
if (first_added_arg >= args_length) return array->elements();
MaybeObject* maybe_array = array->EnsureCanContainElements(
args,
first_added_arg,
args_length - first_added_arg);
if (maybe_array->IsFailure()) return maybe_array;
return array->elements();
}
static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
JSArray* receiver) {
if (!FLAG_clever_optimizations) return false;
Context* global_context = heap->isolate()->context()->global_context();
JSObject* array_proto =
JSObject::cast(global_context->array_function()->prototype());
@ -451,18 +413,20 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
HandleScope handleScope(isolate);
Handle<Object> js_builtin =
GetProperty(Handle<JSObject>(isolate->global_context()->builtins()),
GetProperty(Handle<JSObject>(
isolate->global_context()->builtins()),
name);
Handle<JSFunction> function = Handle<JSFunction>::cast(js_builtin);
int argc = args.length() - 1;
ScopedVector<Handle<Object> > argv(argc);
for (int i = 0; i < argc; ++i) {
argv[i] = args.at<Object>(i + 1);
ASSERT(js_builtin->IsJSFunction());
Handle<JSFunction> function(Handle<JSFunction>::cast(js_builtin));
ScopedVector<Object**> argv(args.length() - 1);
int n_args = args.length() - 1;
for (int i = 0; i < n_args; i++) {
argv[i] = args.at<Object>(i + 1).location();
}
bool pending_exception;
bool pending_exception = false;
Handle<Object> result = Execution::Call(function,
args.receiver(),
argc,
n_args,
argv.start(),
&pending_exception);
if (pending_exception) return Failure::Exception();
@ -475,7 +439,7 @@ BUILTIN(ArrayPush) {
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1);
EnsureJSArrayWithWritableFastElements(heap, receiver);
if (maybe_elms_obj == NULL) {
return CallJsBuiltin(isolate, "ArrayPush", args);
}
@ -511,6 +475,7 @@ BUILTIN(ArrayPush) {
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
}
// Add the provided values.
@ -520,10 +485,6 @@ BUILTIN(ArrayPush) {
elms->set(index + len, args[index + 1], mode);
}
if (elms != array->elements()) {
array->set_elements(elms);
}
// Set the length.
array->set_length(Smi::FromInt(new_length));
return Smi::FromInt(new_length);
@ -535,7 +496,7 @@ BUILTIN(ArrayPop) {
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
EnsureJSArrayWithWritableFastElements(heap, receiver);
if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
@ -568,7 +529,7 @@ BUILTIN(ArrayShift) {
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
EnsureJSArrayWithWritableFastElements(heap, receiver);
if (maybe_elms_obj == NULL)
return CallJsBuiltin(isolate, "ArrayShift", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@ -578,7 +539,7 @@ BUILTIN(ArrayShift) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastTypeElements());
ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value();
@ -590,7 +551,9 @@ BUILTIN(ArrayShift) {
}
if (!heap->lo_space()->Contains(elms)) {
array->set_elements(LeftTrimFixedArray(heap, elms, 1));
// As elms still in the same space they used to be,
// there is no need to update region dirty mark.
array->set_elements(LeftTrimFixedArray(heap, elms, 1), SKIP_WRITE_BARRIER);
} else {
// Shift the elements.
AssertNoAllocation no_gc;
@ -610,7 +573,7 @@ BUILTIN(ArrayUnshift) {
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
EnsureJSArrayWithWritableFastElements(heap, receiver);
if (maybe_elms_obj == NULL)
return CallJsBuiltin(isolate, "ArrayUnshift", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@ -620,7 +583,7 @@ BUILTIN(ArrayUnshift) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastTypeElements());
ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
@ -629,10 +592,6 @@ BUILTIN(ArrayUnshift) {
// we should never hit this case.
ASSERT(to_add <= (Smi::kMaxValue - len));
MaybeObject* maybe_object =
array->EnsureCanContainElements(&args, 1, to_add);
if (maybe_object->IsFailure()) return maybe_object;
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
@ -641,11 +600,13 @@ BUILTIN(ArrayUnshift) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* new_elms = FixedArray::cast(obj);
AssertNoAllocation no_gc;
if (len > 0) {
CopyElements(heap, &no_gc, new_elms, to_add, elms, 0, len);
}
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
} else {
@ -673,7 +634,7 @@ BUILTIN(ArraySlice) {
int len = -1;
if (receiver->IsJSArray()) {
JSArray* array = JSArray::cast(receiver);
if (!array->HasFastTypeElements() ||
if (!array->HasFastElements() ||
!IsJSArrayFastElementMovingAllowed(heap, array)) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@ -689,7 +650,7 @@ BUILTIN(ArraySlice) {
bool is_arguments_object_with_fast_elements =
receiver->IsJSObject()
&& JSObject::cast(receiver)->map() == arguments_map
&& JSObject::cast(receiver)->HasFastTypeElements();
&& JSObject::cast(receiver)->HasFastElements();
if (!is_arguments_object_with_fast_elements) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@ -760,10 +721,6 @@ BUILTIN(ArraySlice) {
}
FixedArray* result_elms = FixedArray::cast(result);
MaybeObject* maybe_object =
result_array->EnsureCanContainElements(result_elms);
if (maybe_object->IsFailure()) return maybe_object;
AssertNoAllocation no_gc;
CopyElements(heap, &no_gc, result_elms, 0, elms, k, result_len);
@ -781,7 +738,7 @@ BUILTIN(ArraySplice) {
Object* receiver = *args.receiver();
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3);
EnsureJSArrayWithWritableFastElements(heap, receiver);
if (maybe_elms_obj == NULL)
return CallJsBuiltin(isolate, "ArraySplice", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@ -791,7 +748,7 @@ BUILTIN(ArraySplice) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastTypeElements());
ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value();
@ -868,9 +825,9 @@ BUILTIN(ArraySplice) {
}
int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
int new_length = len - actual_delete_count + item_count;
bool elms_changed = false;
if (item_count < actual_delete_count) {
// Shrink the array.
const bool trim_array = !heap->lo_space()->Contains(elms) &&
@ -885,8 +842,7 @@ BUILTIN(ArraySplice) {
}
elms = LeftTrimFixedArray(heap, elms, delta);
elms_changed = true;
array->set_elements(elms, SKIP_WRITE_BARRIER);
} else {
AssertNoAllocation no_gc;
MoveElements(heap, &no_gc,
@ -926,7 +882,7 @@ BUILTIN(ArraySplice) {
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
elms_changed = true;
array->set_elements(elms);
} else {
AssertNoAllocation no_gc;
MoveElements(heap, &no_gc,
@ -942,10 +898,6 @@ BUILTIN(ArraySplice) {
elms->set(k, args[3 + k - actual_start], mode);
}
if (elms_changed) {
array->set_elements(elms);
}
// Set the length.
array->set_length(Smi::FromInt(new_length));
@ -968,7 +920,7 @@ BUILTIN(ArrayConcat) {
int result_len = 0;
for (int i = 0; i < n_arguments; i++) {
Object* arg = args[i];
if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastTypeElements()
if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements()
|| JSArray::cast(arg)->GetPrototype() != array_proto) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
@ -1004,17 +956,6 @@ BUILTIN(ArrayConcat) {
}
FixedArray* result_elms = FixedArray::cast(result);
// Ensure element type transitions happen before copying elements in.
if (result_array->HasFastSmiOnlyElements()) {
for (int i = 0; i < n_arguments; i++) {
JSArray* array = JSArray::cast(args[i]);
if (!array->HasFastSmiOnlyElements()) {
result_array->EnsureCanContainNonSmiElements();
break;
}
}
}
// Copy data.
AssertNoAllocation no_gc;
int start_pos = 0;
@ -1666,22 +1607,20 @@ void Builtins::Setup(bool create_heap_objects) {
const BuiltinDesc* functions = BuiltinFunctionTable::functions();
// For now we generate builtin adaptor code into a stack-allocated
// buffer, before copying it into individual code objects. Be careful
// with alignment, some platforms don't like unaligned code.
union { int force_alignment; byte buffer[4*KB]; } u;
// buffer, before copying it into individual code objects.
byte buffer[4*KB];
// Traverse the list of builtins and generate an adaptor in a
// separate code object for each one.
for (int i = 0; i < builtin_count; i++) {
if (create_heap_objects) {
MacroAssembler masm(isolate, u.buffer, sizeof u.buffer);
MacroAssembler masm(isolate, buffer, sizeof buffer);
// Generate the code/adaptor.
typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
// We pass all arguments to the generator, but it may not use all of
// them. This works because the first arguments are on top of the
// stack.
ASSERT(!masm.has_frame());
g(&masm, functions[i].name, functions[i].extra_args);
// Move the code into the object heap.
CodeDesc desc;

12
deps/v8/src/cached-powers.cc

@ -134,12 +134,14 @@ static const CachedPower kCachedPowers[] = {
};
static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers);
static const int kCachedPowersOffset = 348; // -1 * the first decimal_exponent.
static const int kCachedPowersOffset = -kCachedPowers[0].decimal_exponent;
static const double kD_1_LOG2_10 = 0.30102999566398114; // 1 / lg(10)
// Difference between the decimal exponents in the table above.
const int PowersOfTenCache::kDecimalExponentDistance = 8;
const int PowersOfTenCache::kMinDecimalExponent = -348;
const int PowersOfTenCache::kMaxDecimalExponent = 340;
const int PowersOfTenCache::kDecimalExponentDistance =
kCachedPowers[1].decimal_exponent - kCachedPowers[0].decimal_exponent;
const int PowersOfTenCache::kMinDecimalExponent =
kCachedPowers[0].decimal_exponent;
const int PowersOfTenCache::kMaxDecimalExponent =
kCachedPowers[kCachedPowersLength - 1].decimal_exponent;
void PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
int min_exponent,

45
deps/v8/src/code-stubs.cc

@ -52,12 +52,11 @@ void CodeStub::GenerateCode(MacroAssembler* masm) {
// Update the static counter each time a new code stub is generated.
masm->isolate()->counters()->code_stubs()->Increment();
// Nested stubs are not allowed for leaves.
AllowStubCallsScope allow_scope(masm, false);
// Nested stubs are not allowed for leafs.
AllowStubCallsScope allow_scope(masm, AllowsStubCalls());
// Generate the code for the stub.
masm->set_generating_stub(true);
NoCurrentFrameScope scope(masm);
Generate(masm);
}
@ -128,10 +127,8 @@ Handle<Code> CodeStub::GetCode() {
GetKey(),
new_object);
heap->public_set_code_stubs(*dict);
code = *new_object;
Activate(code);
} else {
CHECK(IsPregenerated() == code->is_pregenerated());
}
ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code));
@ -169,11 +166,7 @@ MaybeObject* CodeStub::TryGetCode() {
heap->code_stubs()->AtNumberPut(GetKey(), code);
if (maybe_new_object->ToObject(&new_object)) {
heap->public_set_code_stubs(NumberDictionary::cast(new_object));
} else if (MustBeInStubCache()) {
return maybe_new_object;
}
Activate(code);
}
return code;
@ -195,11 +188,6 @@ const char* CodeStub::MajorName(CodeStub::Major major_key,
}
void CodeStub::PrintName(StringStream* stream) {
stream->Add("%s", MajorName(MajorKey(), false));
}
int ICCompareStub::MinorKey() {
return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
}
@ -257,7 +245,6 @@ void InstanceofStub::PrintName(StringStream* stream) {
void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
break;
case FAST_DOUBLE_ELEMENTS:
@ -287,11 +274,7 @@ void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS: {
KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_js_array_,
elements_kind_);
}
KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_);
break;
case FAST_DOUBLE_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
@ -319,20 +302,24 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::PrintName(StringStream* stream) {
stream->Add("ArgumentsAccessStub_");
const char* type_name = NULL; // Make g++ happy.
switch (type_) {
case READ_ELEMENT: stream->Add("ReadElement"); break;
case NEW_NON_STRICT_FAST: stream->Add("NewNonStrictFast"); break;
case NEW_NON_STRICT_SLOW: stream->Add("NewNonStrictSlow"); break;
case NEW_STRICT: stream->Add("NewStrict"); break;
case READ_ELEMENT: type_name = "ReadElement"; break;
case NEW_NON_STRICT_FAST: type_name = "NewNonStrictFast"; break;
case NEW_NON_STRICT_SLOW: type_name = "NewNonStrictSlow"; break;
case NEW_STRICT: type_name = "NewStrict"; break;
}
stream->Add("ArgumentsAccessStub_%s", type_name);
}
void CallFunctionStub::PrintName(StringStream* stream) {
stream->Add("CallFunctionStub_Args%d", argc_);
if (ReceiverMightBeImplicit()) stream->Add("_Implicit");
if (RecordCallTarget()) stream->Add("_Recording");
const char* flags_name = NULL; // Make g++ happy.
switch (flags_) {
case NO_CALL_FUNCTION_FLAGS: flags_name = ""; break;
case RECEIVER_MIGHT_BE_IMPLICIT: flags_name = "_Implicit"; break;
}
stream->Add("CallFunctionStub_Args%d%s", argc_, flags_name);
}

117
deps/v8/src/code-stubs.h

@ -45,23 +45,27 @@ namespace internal {
V(Compare) \
V(CompareIC) \
V(MathPow) \
V(RecordWrite) \
V(StoreBufferOverflow) \
V(RegExpExec) \
V(TranscendentalCache) \
V(Instanceof) \
/* All stubs above this line only exist in a few versions, which are */ \
/* generated ahead of time. Therefore compiling a call to one of */ \
/* them can't cause a new stub to be compiled, so compiling a call to */ \
/* them is GC safe. The ones below this line exist in many variants */ \
/* so code compiling a call to one can cause a GC. This means they */ \
/* can't be called from other stubs, since stub generation code is */ \
/* not GC safe. */ \
V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \
V(StackCheck) \
V(FastNewClosure) \
V(FastNewContext) \
V(FastNewBlockContext) \
V(FastCloneShallowArray) \
V(RevertToNumber) \
V(ToBoolean) \
V(ToNumber) \
V(CounterOp) \
V(ArgumentsAccess) \
V(RegExpExec) \
V(RegExpConstructResult) \
V(NumberToString) \
V(CEntry) \
@ -69,7 +73,7 @@ namespace internal {
V(KeyedLoadElement) \
V(KeyedStoreElement) \
V(DebuggerStatement) \
V(StringDictionaryLookup)
V(StringDictionaryNegativeLookup)
// List of code stubs only used on ARM platforms.
#ifdef V8_TARGET_ARCH_ARM
@ -138,27 +142,6 @@ class CodeStub BASE_EMBEDDED {
virtual ~CodeStub() {}
bool CompilingCallsToThisStubIsGCSafe() {
bool is_pregenerated = IsPregenerated();
Code* code = NULL;
CHECK(!is_pregenerated || FindCodeInCache(&code));
return is_pregenerated;
}
// See comment above, where Instanceof is defined.
virtual bool IsPregenerated() { return false; }
static void GenerateStubsAheadOfTime();
static void GenerateFPStubs();
// Some stubs put untagged junk on the stack that cannot be scanned by the
// GC. This means that we must be statically sure that no GC can occur while
// they are running. If that is the case they should override this to return
// true, which will cause an assertion if we try to call something that can
// GC or if we try to put a stack frame on top of the junk, which would not
// result in a traversable stack.
virtual bool SometimesSetsUpAFrame() { return true; }
protected:
static const int kMajorBits = 6;
static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
@ -181,14 +164,6 @@ class CodeStub BASE_EMBEDDED {
// Finish the code object after it has been generated.
virtual void FinishCode(Code* code) { }
// Returns true if TryGetCode should fail if it failed
// to register newly generated stub in the stub cache.
virtual bool MustBeInStubCache() { return false; }
// Activate newly generated stub. Is called after
// registering stub in the stub cache.
virtual void Activate(Code* code) { }
// Returns information for computing the number key.
virtual Major MajorKey() = 0;
virtual int MinorKey() = 0;
@ -203,7 +178,9 @@ class CodeStub BASE_EMBEDDED {
// Returns a name for logging/debugging purposes.
SmartArrayPointer<const char> GetName();
virtual void PrintName(StringStream* stream);
virtual void PrintName(StringStream* stream) {
stream->Add("%s", MajorName(MajorKey(), false));
}
// Returns whether the code generated for this stub needs to be allocated as
// a fixed (non-moveable) code object.
@ -216,6 +193,9 @@ class CodeStub BASE_EMBEDDED {
MajorKeyBits::encode(MajorKey());
}
// See comment above, where Instanceof is defined.
bool AllowsStubCalls() { return MajorKey() <= Instanceof; }
class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {};
class MinorKeyBits: public BitField<uint32_t, kMajorBits, kMinorBits> {};
@ -324,7 +304,7 @@ class FastNewContextStub : public CodeStub {
static const int kMaximumSlots = 64;
explicit FastNewContextStub(int slots) : slots_(slots) {
ASSERT(slots_ > 0 && slots_ <= kMaximumSlots);
ASSERT(slots_ > 0 && slots <= kMaximumSlots);
}
void Generate(MacroAssembler* masm);
@ -337,24 +317,6 @@ class FastNewContextStub : public CodeStub {
};
class FastNewBlockContextStub : public CodeStub {
public:
static const int kMaximumSlots = 64;
explicit FastNewBlockContextStub(int slots) : slots_(slots) {
ASSERT(slots_ > 0 && slots_ <= kMaximumSlots);
}
void Generate(MacroAssembler* masm);
private:
int slots_;
Major MajorKey() { return FastNewBlockContext; }
int MinorKey() { return slots_; }
};
class FastCloneShallowArrayStub : public CodeStub {
public:
// Maximum length of copied elements array.
@ -569,18 +531,11 @@ class CompareStub: public CodeStub {
class CEntryStub : public CodeStub {
public:
explicit CEntryStub(int result_size,
SaveFPRegsMode save_doubles = kDontSaveFPRegs)
: result_size_(result_size), save_doubles_(save_doubles) { }
explicit CEntryStub(int result_size)
: result_size_(result_size), save_doubles_(false) { }
void Generate(MacroAssembler* masm);
// The version of this stub that doesn't save doubles is generated ahead of
// time, so it's OK to call it from other stubs that can't cope with GC during
// their code generation. On machines that always have gp registers (x64) we
// can generate both variants ahead of time.
virtual bool IsPregenerated();
static void GenerateAheadOfTime();
void SaveDoubles() { save_doubles_ = true; }
private:
void GenerateCore(MacroAssembler* masm,
@ -595,7 +550,7 @@ class CEntryStub : public CodeStub {
// Number of pointers/values returned.
const int result_size_;
SaveFPRegsMode save_doubles_;
bool save_doubles_;
Major MajorKey() { return CEntry; }
int MinorKey();
@ -692,32 +647,10 @@ class CallFunctionStub: public CodeStub {
void Generate(MacroAssembler* masm);
virtual void FinishCode(Code* code);
static void Clear(Heap* heap, Address address);
static Object* GetCachedValue(Address address);
static int ExtractArgcFromMinorKey(int minor_key) {
return ArgcBits::decode(minor_key);
}
// The object that indicates an uninitialized cache.
static Handle<Object> UninitializedSentinel(Isolate* isolate) {
return isolate->factory()->the_hole_value();
}
// A raw version of the uninitialized sentinel that's safe to read during
// garbage collection (e.g., for patching the cache).
static Object* RawUninitializedSentinel(Heap* heap) {
return heap->raw_unchecked_the_hole_value();
}
// The object that indicates a megamorphic state.
static Handle<Object> MegamorphicSentinel(Isolate* isolate) {
return isolate->factory()->undefined_value();
}
private:
int argc_;
CallFunctionFlags flags_;
@ -725,8 +658,8 @@ class CallFunctionStub: public CodeStub {
virtual void PrintName(StringStream* stream);
// Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
class FlagBits: public BitField<CallFunctionFlags, 0, 2> {};
class ArgcBits: public BitField<unsigned, 2, 32 - 2> {};
class FlagBits: public BitField<CallFunctionFlags, 0, 1> {};
class ArgcBits: public BitField<unsigned, 1, 32 - 1> {};
Major MajorKey() { return CallFunction; }
int MinorKey() {
@ -737,10 +670,6 @@ class CallFunctionStub: public CodeStub {
bool ReceiverMightBeImplicit() {
return (flags_ & RECEIVER_MIGHT_BE_IMPLICIT) != 0;
}
bool RecordCallTarget() {
return (flags_ & RECORD_CALL_TARGET) != 0;
}
};
@ -1005,8 +934,6 @@ class ToBooleanStub: public CodeStub {
virtual int GetCodeKind() { return Code::TO_BOOLEAN_IC; }
virtual void PrintName(StringStream* stream);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
Major MajorKey() { return ToBoolean; }
int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) | types_.ToByte(); }

2
deps/v8/src/codegen.cc

@ -218,8 +218,8 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
int CEntryStub::MinorKey() {
int result = (save_doubles_ == kSaveFPRegs) ? 1 : 0;
ASSERT(result_size_ == 1 || result_size_ == 2);
int result = save_doubles_ ? 1 : 0;
#ifdef _WIN64
return result | ((result_size_ == 1) ? 0 : 2);
#else

77
deps/v8/src/compiler-intrinsics.h

@ -1,77 +0,0 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_COMPILER_INTRINSICS_H_
#define V8_COMPILER_INTRINSICS_H_
namespace v8 {
namespace internal {
class CompilerIntrinsics {
public:
// Returns number of zero bits preceding least significant 1 bit.
// Undefined for zero value.
INLINE(static int CountTrailingZeros(uint32_t value));
// Returns number of zero bits following most significant 1 bit.
// Undefined for zero value.
INLINE(static int CountLeadingZeros(uint32_t value));
};
#ifdef __GNUC__
int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
return __builtin_ctz(value);
}
int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
return __builtin_clz(value);
}
#elif defined(_MSC_VER)
#pragma intrinsic(_BitScanForward)
#pragma intrinsic(_BitScanReverse)
int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
unsigned long result; //NOLINT
_BitScanForward(&result, static_cast<long>(value)); //NOLINT
return static_cast<int>(result);
}
int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
unsigned long result; //NOLINT
_BitScanReverse(&result, static_cast<long>(value)); //NOLINT
return 31 - static_cast<int>(result);
}
#else
#error Unsupported compiler
#endif
} } // namespace v8::internal
#endif // V8_COMPILER_INTRINSICS_H_

13
deps/v8/src/compiler.cc

@ -36,7 +36,6 @@
#include "full-codegen.h"
#include "gdb-jit.h"
#include "hydrogen.h"
#include "isolate-inl.h"
#include "lithium.h"
#include "liveedit.h"
#include "parser.h"
@ -276,7 +275,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
}
Handle<Context> global_context(info->closure()->context()->global_context());
TypeFeedbackOracle oracle(code, global_context, info->isolate());
TypeFeedbackOracle oracle(code, global_context);
HGraphBuilder builder(info, &oracle);
HPhase phase(HPhase::kTotal);
HGraph* graph = builder.CreateGraph();
@ -480,7 +479,8 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
// that would be compiled lazily anyway, so we skip the preparse step
// in that case too.
ScriptDataImpl* pre_data = input_pre_data;
bool harmony_scoping = natives != NATIVES_CODE && FLAG_harmony_scoping;
bool harmony_block_scoping = natives != NATIVES_CODE &&
FLAG_harmony_block_scoping;
if (pre_data == NULL
&& source_length >= FLAG_min_preparse_length) {
if (source->IsExternalTwoByteString()) {
@ -488,12 +488,12 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
Handle<ExternalTwoByteString>::cast(source), 0, source->length());
pre_data = ParserApi::PartialPreParse(&stream,
extension,
harmony_scoping);
harmony_block_scoping);
} else {
GenericStringUC16CharacterStream stream(source, 0, source->length());
pre_data = ParserApi::PartialPreParse(&stream,
extension,
harmony_scoping);
harmony_block_scoping);
}
}
@ -516,9 +516,6 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
info.MarkAsGlobal();
info.SetExtension(extension);
info.SetPreParseData(pre_data);
if (natives == NATIVES_CODE) {
info.MarkAsAllowingNativesSyntax();
}
result = MakeFunctionInfo(&info);
if (extension == NULL && !result.is_null()) {
compilation_cache->PutScript(source, result);

8
deps/v8/src/compiler.h

@ -83,12 +83,6 @@ class CompilationInfo BASE_EMBEDDED {
ASSERT(is_lazy());
flags_ |= IsInLoop::encode(true);
}
void MarkAsAllowingNativesSyntax() {
flags_ |= IsNativesSyntaxAllowed::encode(true);
}
bool allows_natives_syntax() const {
return IsNativesSyntaxAllowed::decode(flags_);
}
void MarkAsNative() {
flags_ |= IsNative::encode(true);
}
@ -199,8 +193,6 @@ class CompilationInfo BASE_EMBEDDED {
class IsInLoop: public BitField<bool, 3, 1> {};
// Strict mode - used in eager compilation.
class IsStrictMode: public BitField<bool, 4, 1> {};
// Native syntax (%-stuff) allowed?
class IsNativesSyntaxAllowed: public BitField<bool, 5, 1> {};
// Is this a function from our natives.
class IsNative: public BitField<bool, 6, 1> {};

108
deps/v8/src/contexts.cc

@ -86,14 +86,14 @@ void Context::set_global_proxy(JSObject* object) {
Handle<Object> Context::Lookup(Handle<String> name,
ContextLookupFlags flags,
int* index,
int* index_,
PropertyAttributes* attributes,
BindingFlags* binding_flags) {
Isolate* isolate = GetIsolate();
Handle<Context> context(this, isolate);
bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
*index = -1;
*index_ = -1;
*attributes = ABSENT;
*binding_flags = MISSING_BINDING;
@ -110,50 +110,70 @@ Handle<Object> Context::Lookup(Handle<String> name,
PrintF("\n");
}
// 1. Check global objects, subjects of with, and extension objects.
if (context->IsGlobalContext() ||
context->IsWithContext() ||
(context->IsFunctionContext() && context->has_extension())) {
Handle<JSObject> object(JSObject::cast(context->extension()), isolate);
// Check extension/with/global object.
if (!context->IsBlockContext() && context->has_extension()) {
if (context->IsCatchContext()) {
// Catch contexts have the variable name in the extension slot.
if (name->Equals(String::cast(context->extension()))) {
if (FLAG_trace_contexts) {
PrintF("=> found in catch context\n");
}
*index_ = Context::THROWN_OBJECT_INDEX;
*attributes = NONE;
*binding_flags = MUTABLE_IS_INITIALIZED;
return context;
}
} else {
ASSERT(context->IsGlobalContext() ||
context->IsFunctionContext() ||
context->IsWithContext());
// Global, function, and with contexts may have an object in the
// extension slot.
Handle<JSObject> extension(JSObject::cast(context->extension()),
isolate);
// Context extension objects needs to behave as if they have no
// prototype. So even if we want to follow prototype chains, we need
// to only do a local lookup for context extension objects.
// prototype. So even if we want to follow prototype chains, we
// need to only do a local lookup for context extension objects.
if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
object->IsJSContextExtensionObject()) {
*attributes = object->GetLocalPropertyAttribute(*name);
extension->IsJSContextExtensionObject()) {
*attributes = extension->GetLocalPropertyAttribute(*name);
} else {
*attributes = object->GetPropertyAttribute(*name);
*attributes = extension->GetPropertyAttribute(*name);
}
if (*attributes != ABSENT) {
// property found
if (FLAG_trace_contexts) {
PrintF("=> found property in context object %p\n",
reinterpret_cast<void*>(*object));
reinterpret_cast<void*>(*extension));
}
return extension;
}
return object;
}
}
// 2. Check the context proper if it has slots.
// Check serialized scope information of functions and blocks. Only
// functions can have parameters, and a function name.
if (context->IsFunctionContext() || context->IsBlockContext()) {
// Use serialized scope information of functions and blocks to search
// for the context index.
// We may have context-local slots. Check locals in the context.
Handle<SerializedScopeInfo> scope_info;
if (context->IsFunctionContext()) {
scope_info = Handle<SerializedScopeInfo>(
context->closure()->shared()->scope_info(), isolate);
} else {
ASSERT(context->IsBlockContext());
scope_info = Handle<SerializedScopeInfo>(
SerializedScopeInfo::cast(context->extension()), isolate);
}
VariableMode mode;
int slot_index = scope_info->ContextSlotIndex(*name, &mode);
ASSERT(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
if (slot_index >= 0) {
Variable::Mode mode;
int index = scope_info->ContextSlotIndex(*name, &mode);
ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
if (index >= 0) {
if (FLAG_trace_contexts) {
PrintF("=> found local in context slot %d (mode = %d)\n",
slot_index, mode);
index, mode);
}
*index = slot_index;
*index_ = index;
// Note: Fixed context slots are statically allocated by the compiler.
// Statically allocated variables always have a statically known mode,
// which is the mode with which they were declared when added to the
@ -161,23 +181,23 @@ Handle<Object> Context::Lookup(Handle<String> name,
// declared variables that were introduced through declaration nodes)
// must not appear here.
switch (mode) {
case INTERNAL: // Fall through.
case VAR:
case Variable::INTERNAL: // Fall through.
case Variable::VAR:
*attributes = NONE;
*binding_flags = MUTABLE_IS_INITIALIZED;
break;
case LET:
case Variable::LET:
*attributes = NONE;
*binding_flags = MUTABLE_CHECK_INITIALIZED;
break;
case CONST:
case Variable::CONST:
*attributes = READ_ONLY;
*binding_flags = IMMUTABLE_CHECK_INITIALIZED;
break;
case DYNAMIC:
case DYNAMIC_GLOBAL:
case DYNAMIC_LOCAL:
case TEMPORARY:
case Variable::DYNAMIC:
case Variable::DYNAMIC_GLOBAL:
case Variable::DYNAMIC_LOCAL:
case Variable::TEMPORARY:
UNREACHABLE();
break;
}
@ -186,34 +206,22 @@ Handle<Object> Context::Lookup(Handle<String> name,
// Check the slot corresponding to the intermediate context holding
// only the function name variable.
if (follow_context_chain && context->IsFunctionContext()) {
int function_index = scope_info->FunctionContextSlotIndex(*name);
if (function_index >= 0) {
if (follow_context_chain) {
int index = scope_info->FunctionContextSlotIndex(*name);
if (index >= 0) {
if (FLAG_trace_contexts) {
PrintF("=> found intermediate function in context slot %d\n",
function_index);
index);
}
*index = function_index;
*index_ = index;
*attributes = READ_ONLY;
*binding_flags = IMMUTABLE_IS_INITIALIZED;
return context;
}
}
} else if (context->IsCatchContext()) {
// Catch contexts have the variable name in the extension slot.
if (name->Equals(String::cast(context->extension()))) {
if (FLAG_trace_contexts) {
PrintF("=> found in catch context\n");
}
*index = Context::THROWN_OBJECT_INDEX;
*attributes = NONE;
*binding_flags = MUTABLE_IS_INITIALIZED;
return context;
}
}
// 3. Prepare to continue with the previous (next outermost) context.
// Proceed with the previous context.
if (context->IsGlobalContext()) {
follow_context_chain = false;
} else {
@ -245,7 +253,7 @@ bool Context::GlobalIfNotShadowedByEval(Handle<String> name) {
// Check non-parameter locals.
Handle<SerializedScopeInfo> scope_info(
context->closure()->shared()->scope_info());
VariableMode mode;
Variable::Mode mode;
int index = scope_info->ContextSlotIndex(*name, &mode);
ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
if (index >= 0) return false;

41
deps/v8/src/contexts.h

@ -134,8 +134,6 @@ enum BindingFlags {
V(MAP_CACHE_INDEX, Object, map_cache) \
V(CONTEXT_DATA_INDEX, Object, data) \
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
to_complete_property_descriptor) \
V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap)
@ -254,7 +252,6 @@ class Context: public FixedArray {
OUT_OF_MEMORY_INDEX,
CONTEXT_DATA_INDEX,
ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
DERIVED_HAS_TRAP_INDEX,
DERIVED_GET_TRAP_INDEX,
DERIVED_SET_TRAP_INDEX,
@ -333,6 +330,12 @@ class Context: public FixedArray {
// Mark the global context with out of memory.
inline void mark_out_of_memory();
// The exception holder is the object used as a with object in
// the implementation of a catch block.
bool is_exception_holder(Object* object) {
return IsCatchContext() && extension() == object;
}
// A global context hold a list of all functions which have been optimized.
void AddOptimizedFunction(JSFunction* function);
void RemoveOptimizedFunction(JSFunction* function);
@ -352,25 +355,29 @@ class Context: public FixedArray {
#undef GLOBAL_CONTEXT_FIELD_ACCESSORS
// Lookup the the slot called name, starting with the current context.
// There are three possibilities:
// There are 4 possible outcomes:
//
// 1) index_ >= 0 && result->IsContext():
// most common case, the result is a Context, and index is the
// context slot index, and the slot exists.
// attributes == READ_ONLY for the function name variable, NONE otherwise.
//
// 1) result->IsContext():
// The binding was found in a context. *index is always the
// non-negative slot index. *attributes is NONE for var and let
// declarations, READ_ONLY for const declarations (never ABSENT).
// 2) index_ >= 0 && result->IsJSObject():
// the result is the JSObject arguments object, the index is the parameter
// index, i.e., key into the arguments object, and the property exists.
// attributes != ABSENT.
//
// 2) result->IsJSObject():
// The binding was found as a named property in a context extension
// object (i.e., was introduced via eval), as a property on the subject
// of with, or as a property of the global object. *index is -1 and
// *attributes is not ABSENT.
// 3) index_ < 0 && result->IsJSObject():
// the result is the JSObject extension context or the global object,
// and the name is the property name, and the property exists.
// attributes != ABSENT.
//
// 3) result.is_null():
// There was no binding found, *index is always -1 and *attributes is
// always ABSENT.
// 4) index_ < 0 && result.is_null():
// there was no context found with the corresponding property.
// attributes == ABSENT.
Handle<Object> Lookup(Handle<String> name,
ContextLookupFlags flags,
int* index,
int* index_,
PropertyAttributes* attributes,
BindingFlags* binding_flags);

2
deps/v8/src/conversions-inl.h

@ -47,7 +47,7 @@ namespace v8 {
namespace internal {
static inline double JunkStringValue() {
return BitCast<double, uint64_t>(kQuietNaNMask);
return std::numeric_limits<double>::quiet_NaN();
}

2
deps/v8/src/conversions.h

@ -28,6 +28,8 @@
#ifndef V8_CONVERSIONS_H_
#define V8_CONVERSIONS_H_
#include <limits>
#include "utils.h"
namespace v8 {

5
deps/v8/src/d8-debug.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifdef ENABLE_DEBUGGER_SUPPORT
#include "d8.h"
#include "d8-debug.h"
@ -368,5 +367,3 @@ void KeyboardThread::Run() {
} // namespace v8
#endif // ENABLE_DEBUGGER_SUPPORT

32
deps/v8/src/d8.cc

@ -146,11 +146,11 @@ bool Shell::ExecuteString(Handle<String> source,
Handle<Value> name,
bool print_result,
bool report_exceptions) {
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
#ifndef V8_SHARED
bool FLAG_debugger = i::FLAG_debugger;
#else
bool FLAG_debugger = false;
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
#endif // V8_SHARED
HandleScope handle_scope;
TryCatch try_catch;
options.script_executed = true;
@ -594,7 +594,6 @@ void Shell::InstallUtilityScript() {
Context::Scope utility_scope(utility_context_);
#ifdef ENABLE_DEBUGGER_SUPPORT
if (i::FLAG_debugger) printf("JavaScript debugger enabled\n");
// Install the debugger object in the utility scope
i::Debug* debug = i::Isolate::Current()->debug();
debug->Load();
@ -817,7 +816,7 @@ void Shell::OnExit() {
static FILE* FOpen(const char* path, const char* mode) {
#if defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64))
#if (defined(_WIN32) || defined(_WIN64))
FILE* result;
if (fopen_s(&result, path, mode) == 0) {
return result;
@ -901,6 +900,9 @@ void Shell::RunShell() {
#ifndef V8_SHARED
console = LineEditor::Get();
printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
if (i::FLAG_debugger) {
printf("JavaScript debugger enabled\n");
}
console->Open();
while (true) {
i::SmartArrayPointer<char> input = console->Prompt(Shell::kPrompt);
@ -1251,22 +1253,14 @@ int Shell::RunMain(int argc, char* argv[]) {
Locker lock;
HandleScope scope;
Persistent<Context> context = CreateEvaluationContext();
if (options.last_run) {
// Keep using the same context in the interactive shell.
evaluation_context_ = context;
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
// If the interactive debugger is enabled make sure to activate
// it before running the files passed on the command line.
if (i::FLAG_debugger) {
InstallUtilityScript();
}
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
}
{
Context::Scope cscope(context);
options.isolate_sources[0].Execute();
}
if (!options.last_run) {
if (options.last_run) {
// Keep using the same context in the interactive shell
evaluation_context_ = context;
} else {
context.Dispose();
}
@ -1337,11 +1331,9 @@ int Shell::Main(int argc, char* argv[]) {
if (( options.interactive_shell
|| !options.script_executed )
&& !options.test_shell ) {
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
if (!i::FLAG_debugger) {
#ifndef V8_SHARED
InstallUtilityScript();
}
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
#endif // V8_SHARED
RunShell();
}

224
deps/v8/src/debug.cc

@ -40,7 +40,6 @@
#include "global-handles.h"
#include "ic.h"
#include "ic-inl.h"
#include "isolate-inl.h"
#include "list.h"
#include "messages.h"
#include "natives.h"
@ -402,15 +401,15 @@ void BreakLocationIterator::PrepareStepIn() {
// Step in can only be prepared if currently positioned on an IC call,
// construct call or CallFunction stub call.
Address target = rinfo()->target_address();
Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
if (target_code->is_call_stub() || target_code->is_keyed_call_stub()) {
Handle<Code> code(Code::GetCodeFromTargetAddress(target));
if (code->is_call_stub() || code->is_keyed_call_stub()) {
// Step in through IC call is handled by the runtime system. Therefore make
// sure that the any current IC is cleared and the runtime system is
// called. If the executing code has a debug break at the location change
// the call in the original code as it is the code there that will be
// executed in place of the debug break call.
Handle<Code> stub = ComputeCallDebugPrepareStepIn(
target_code->arguments_count(), target_code->kind());
Handle<Code> stub = ComputeCallDebugPrepareStepIn(code->arguments_count(),
code->kind());
if (IsDebugBreak()) {
original_rinfo()->set_target_address(stub->entry());
} else {
@ -420,7 +419,7 @@ void BreakLocationIterator::PrepareStepIn() {
#ifdef DEBUG
// All the following stuff is needed only for assertion checks so the code
// is wrapped in ifdef.
Handle<Code> maybe_call_function_stub = target_code;
Handle<Code> maybe_call_function_stub = code;
if (IsDebugBreak()) {
Address original_target = original_rinfo()->target_address();
maybe_call_function_stub =
@ -437,9 +436,8 @@ void BreakLocationIterator::PrepareStepIn() {
// Step in through CallFunction stub should also be prepared by caller of
// this function (Debug::PrepareStep) which should flood target function
// with breakpoints.
ASSERT(RelocInfo::IsConstructCall(rmode()) ||
target_code->is_inline_cache_stub() ||
is_call_function_stub);
ASSERT(RelocInfo::IsConstructCall(rmode()) || code->is_inline_cache_stub()
|| is_call_function_stub);
#endif
}
}
@ -476,11 +474,11 @@ void BreakLocationIterator::SetDebugBreakAtIC() {
RelocInfo::Mode mode = rmode();
if (RelocInfo::IsCodeTarget(mode)) {
Address target = rinfo()->target_address();
Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
Handle<Code> code(Code::GetCodeFromTargetAddress(target));
// Patch the code to invoke the builtin debug break function matching the
// calling convention used by the call site.
Handle<Code> dbgbrk_code(Debug::FindDebugBreak(target_code, mode));
Handle<Code> dbgbrk_code(Debug::FindDebugBreak(code, mode));
rinfo()->set_target_address(dbgbrk_code->entry());
}
}
@ -774,7 +772,7 @@ bool Debug::CompileDebuggerScript(int index) {
// Execute the shared function in the debugger context.
Handle<Context> context = isolate->global_context();
bool caught_exception;
bool caught_exception = false;
Handle<JSFunction> function =
factory->NewFunctionFromSharedFunctionInfo(function_info, context);
@ -1105,13 +1103,14 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id());
// Call HandleBreakPointx.
bool caught_exception;
Handle<Object> argv[] = { break_id, break_point_object };
bool caught_exception = false;
const int argc = 2;
Object** argv[argc] = {
break_id.location(),
reinterpret_cast<Object**>(break_point_object.location())
};
Handle<Object> result = Execution::TryCall(check_break_point,
isolate_->js_builtins_object(),
ARRAY_SIZE(argv),
argv,
&caught_exception);
isolate_->js_builtins_object(), argc, argv, &caught_exception);
// If exception or non boolean result handle as not triggered
if (caught_exception || !result->IsBoolean()) {
@ -1733,10 +1732,6 @@ void Debug::PrepareForBreakPoints() {
if (!has_break_points_) {
Deoptimizer::DeoptimizeAll();
// We are going to iterate heap to find all functions without
// debug break slots.
isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
AssertNoAllocation no_allocation;
Builtins* builtins = isolate_->builtins();
Code* lazy_compile = builtins->builtin(Builtins::kLazyCompile);
@ -2002,10 +1997,9 @@ void Debug::CreateScriptCache() {
// Perform two GCs to get rid of all unreferenced scripts. The first GC gets
// rid of all the cached script wrappers and the second gets rid of the
// scripts which are no longer referenced. The second also sweeps precisely,
// which saves us doing yet another GC to make the heap iterable.
heap->CollectAllGarbage(Heap::kNoGCFlags);
heap->CollectAllGarbage(Heap::kMakeHeapIterableMask);
// scripts which are no longer referenced.
heap->CollectAllGarbage(false);
heap->CollectAllGarbage(false);
ASSERT(script_cache_ == NULL);
script_cache_ = new ScriptCache();
@ -2013,8 +2007,6 @@ void Debug::CreateScriptCache() {
// Scan heap for Script objects.
int count = 0;
HeapIterator iterator;
AssertNoAllocation no_allocation;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
script_cache_->Add(Handle<Script>(Script::cast(obj)));
@ -2055,7 +2047,7 @@ Handle<FixedArray> Debug::GetLoadedScripts() {
// Perform GC to get unreferenced scripts evicted from the cache before
// returning the content.
isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags);
isolate_->heap()->CollectAllGarbage(false);
// Get the scripts from the cache.
return script_cache_->GetScripts();
@ -2101,8 +2093,7 @@ Debugger::~Debugger() {
Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
int argc,
Handle<Object> argv[],
int argc, Object*** argv,
bool* caught_exception) {
ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
@ -2119,9 +2110,7 @@ Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
Handle<Object> js_object = Execution::TryCall(
Handle<JSFunction>::cast(constructor),
Handle<JSObject>(isolate_->debug()->debug_context()->global()),
argc,
argv,
caught_exception);
argc, argv, caught_exception);
return js_object;
}
@ -2130,11 +2119,10 @@ Handle<Object> Debugger::MakeExecutionState(bool* caught_exception) {
// Create the execution state object.
Handle<Object> break_id = isolate_->factory()->NewNumberFromInt(
isolate_->debug()->break_id());
Handle<Object> argv[] = { break_id };
const int argc = 1;
Object** argv[argc] = { break_id.location() };
return MakeJSObject(CStrVector("MakeExecutionState"),
ARRAY_SIZE(argv),
argv,
caught_exception);
argc, argv, caught_exception);
}
@ -2142,9 +2130,11 @@ Handle<Object> Debugger::MakeBreakEvent(Handle<Object> exec_state,
Handle<Object> break_points_hit,
bool* caught_exception) {
// Create the new break event object.
Handle<Object> argv[] = { exec_state, break_points_hit };
const int argc = 2;
Object** argv[argc] = { exec_state.location(),
break_points_hit.location() };
return MakeJSObject(CStrVector("MakeBreakEvent"),
ARRAY_SIZE(argv),
argc,
argv,
caught_exception);
}
@ -2156,24 +2146,23 @@ Handle<Object> Debugger::MakeExceptionEvent(Handle<Object> exec_state,
bool* caught_exception) {
Factory* factory = isolate_->factory();
// Create the new exception event object.
Handle<Object> argv[] = { exec_state,
exception,
factory->ToBoolean(uncaught) };
const int argc = 3;
Object** argv[argc] = { exec_state.location(),
exception.location(),
uncaught ? factory->true_value().location() :
factory->false_value().location()};
return MakeJSObject(CStrVector("MakeExceptionEvent"),
ARRAY_SIZE(argv),
argv,
caught_exception);
argc, argv, caught_exception);
}
Handle<Object> Debugger::MakeNewFunctionEvent(Handle<Object> function,
bool* caught_exception) {
// Create the new function event object.
Handle<Object> argv[] = { function };
const int argc = 1;
Object** argv[argc] = { function.location() };
return MakeJSObject(CStrVector("MakeNewFunctionEvent"),
ARRAY_SIZE(argv),
argv,
caught_exception);
argc, argv, caught_exception);
}
@ -2184,11 +2173,14 @@ Handle<Object> Debugger::MakeCompileEvent(Handle<Script> script,
// Create the compile event object.
Handle<Object> exec_state = MakeExecutionState(caught_exception);
Handle<Object> script_wrapper = GetScriptWrapper(script);
Handle<Object> argv[] = { exec_state,
script_wrapper,
factory->ToBoolean(before) };
const int argc = 3;
Object** argv[argc] = { exec_state.location(),
script_wrapper.location(),
before ? factory->true_value().location() :
factory->false_value().location() };
return MakeJSObject(CStrVector("MakeCompileEvent"),
ARRAY_SIZE(argv),
argc,
argv,
caught_exception);
}
@ -2199,10 +2191,11 @@ Handle<Object> Debugger::MakeScriptCollectedEvent(int id,
// Create the script collected event object.
Handle<Object> exec_state = MakeExecutionState(caught_exception);
Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id));
Handle<Object> argv[] = { exec_state, id_object };
const int argc = 2;
Object** argv[argc] = { exec_state.location(), id_object.location() };
return MakeJSObject(CStrVector("MakeScriptCollectedEvent"),
ARRAY_SIZE(argv),
argc,
argv,
caught_exception);
}
@ -2352,12 +2345,11 @@ void Debugger::OnAfterCompile(Handle<Script> script,
Handle<JSValue> wrapper = GetScriptWrapper(script);
// Call UpdateScriptBreakPoints expect no exceptions.
bool caught_exception;
Handle<Object> argv[] = { wrapper };
bool caught_exception = false;
const int argc = 1;
Object** argv[argc] = { reinterpret_cast<Object**>(wrapper.location()) };
Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points),
Isolate::Current()->js_builtins_object(),
ARRAY_SIZE(argv),
argv,
Isolate::Current()->js_builtins_object(), argc, argv,
&caught_exception);
if (caught_exception) {
return;
@ -2489,16 +2481,13 @@ void Debugger::CallJSEventCallback(v8::DebugEvent event,
Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_));
// Invoke the JavaScript debug event listener.
Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event)),
exec_state,
event_data,
event_listener_data_ };
bool caught_exception;
Execution::TryCall(fun,
isolate_->global(),
ARRAY_SIZE(argv),
argv,
&caught_exception);
const int argc = 4;
Object** argv[argc] = { Handle<Object>(Smi::FromInt(event)).location(),
exec_state.location(),
Handle<Object>::cast(event_data).location(),
event_listener_data_.location() };
bool caught_exception = false;
Execution::TryCall(fun, isolate_->global(), argc, argv, &caught_exception);
// Silently ignore exceptions from debug event listeners.
}
@ -2867,11 +2856,12 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun,
return isolate_->factory()->undefined_value();
}
Handle<Object> argv[] = { exec_state, data };
static const int kArgc = 2;
Object** argv[kArgc] = { exec_state.location(), data.location() };
Handle<Object> result = Execution::Call(
fun,
Handle<Object>(isolate_->debug()->debug_context_->global_proxy()),
ARRAY_SIZE(argv),
kArgc,
argv,
pending_exception);
return result;
@ -2939,94 +2929,6 @@ void Debugger::CallMessageDispatchHandler() {
}
EnterDebugger::EnterDebugger()
: isolate_(Isolate::Current()),
prev_(isolate_->debug()->debugger_entry()),
it_(isolate_),
has_js_frames_(!it_.done()),
save_(isolate_) {
Debug* debug = isolate_->debug();
ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT));
ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK));
// Link recursive debugger entry.
debug->set_debugger_entry(this);
// Store the previous break id and frame id.
break_id_ = debug->break_id();
break_frame_id_ = debug->break_frame_id();
// Create the new break info. If there is no JavaScript frames there is no
// break frame id.
if (has_js_frames_) {
debug->NewBreak(it_.frame()->id());
} else {
debug->NewBreak(StackFrame::NO_ID);
}
// Make sure that debugger is loaded and enter the debugger context.
load_failed_ = !debug->Load();
if (!load_failed_) {
// NOTE the member variable save which saves the previous context before
// this change.
isolate_->set_context(*debug->debug_context());
}
}
EnterDebugger::~EnterDebugger() {
ASSERT(Isolate::Current() == isolate_);
Debug* debug = isolate_->debug();
// Restore to the previous break state.
debug->SetBreak(break_frame_id_, break_id_);
// Check for leaving the debugger.
if (prev_ == NULL) {
// Clear mirror cache when leaving the debugger. Skip this if there is a
// pending exception as clearing the mirror cache calls back into
// JavaScript. This can happen if the v8::Debug::Call is used in which
// case the exception should end up in the calling code.
if (!isolate_->has_pending_exception()) {
// Try to avoid any pending debug break breaking in the clear mirror
// cache JavaScript code.
if (isolate_->stack_guard()->IsDebugBreak()) {
debug->set_interrupts_pending(DEBUGBREAK);
isolate_->stack_guard()->Continue(DEBUGBREAK);
}
debug->ClearMirrorCache();
}
// Request preemption and debug break when leaving the last debugger entry
// if any of these where recorded while debugging.
if (debug->is_interrupt_pending(PREEMPT)) {
// This re-scheduling of preemption is to avoid starvation in some
// debugging scenarios.
debug->clear_interrupt_pending(PREEMPT);
isolate_->stack_guard()->Preempt();
}
if (debug->is_interrupt_pending(DEBUGBREAK)) {
debug->clear_interrupt_pending(DEBUGBREAK);
isolate_->stack_guard()->DebugBreak();
}
// If there are commands in the queue when leaving the debugger request
// that these commands are processed.
if (isolate_->debugger()->HasCommands()) {
isolate_->stack_guard()->DebugCommand();
}
// If leaving the debugger with the debugger no longer active unload it.
if (!isolate_->debugger()->IsDebuggerActive()) {
isolate_->debugger()->UnloadDebugger();
}
}
// Leaving this debugger entry.
debug->set_debugger_entry(prev_);
}
MessageImpl MessageImpl::NewEvent(DebugEvent event,
bool running,
Handle<JSObject> exec_state,

90
deps/v8/src/debug.h

@ -705,8 +705,7 @@ class Debugger {
void DebugRequest(const uint16_t* json_request, int length);
Handle<Object> MakeJSObject(Vector<const char> constructor_name,
int argc,
Handle<Object> argv[],
int argc, Object*** argv,
bool* caught_exception);
Handle<Object> MakeExecutionState(bool* caught_exception);
Handle<Object> MakeBreakEvent(Handle<Object> exec_state,
@ -870,8 +869,91 @@ class Debugger {
// some reason could not be entered FailedToEnter will return true.
class EnterDebugger BASE_EMBEDDED {
public:
EnterDebugger();
~EnterDebugger();
EnterDebugger()
: isolate_(Isolate::Current()),
prev_(isolate_->debug()->debugger_entry()),
it_(isolate_),
has_js_frames_(!it_.done()),
save_(isolate_) {
Debug* debug = isolate_->debug();
ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT));
ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK));
// Link recursive debugger entry.
debug->set_debugger_entry(this);
// Store the previous break id and frame id.
break_id_ = debug->break_id();
break_frame_id_ = debug->break_frame_id();
// Create the new break info. If there is no JavaScript frames there is no
// break frame id.
if (has_js_frames_) {
debug->NewBreak(it_.frame()->id());
} else {
debug->NewBreak(StackFrame::NO_ID);
}
// Make sure that debugger is loaded and enter the debugger context.
load_failed_ = !debug->Load();
if (!load_failed_) {
// NOTE the member variable save which saves the previous context before
// this change.
isolate_->set_context(*debug->debug_context());
}
}
~EnterDebugger() {
ASSERT(Isolate::Current() == isolate_);
Debug* debug = isolate_->debug();
// Restore to the previous break state.
debug->SetBreak(break_frame_id_, break_id_);
// Check for leaving the debugger.
if (prev_ == NULL) {
// Clear mirror cache when leaving the debugger. Skip this if there is a
// pending exception as clearing the mirror cache calls back into
// JavaScript. This can happen if the v8::Debug::Call is used in which
// case the exception should end up in the calling code.
if (!isolate_->has_pending_exception()) {
// Try to avoid any pending debug break breaking in the clear mirror
// cache JavaScript code.
if (isolate_->stack_guard()->IsDebugBreak()) {
debug->set_interrupts_pending(DEBUGBREAK);
isolate_->stack_guard()->Continue(DEBUGBREAK);
}
debug->ClearMirrorCache();
}
// Request preemption and debug break when leaving the last debugger entry
// if any of these where recorded while debugging.
if (debug->is_interrupt_pending(PREEMPT)) {
// This re-scheduling of preemption is to avoid starvation in some
// debugging scenarios.
debug->clear_interrupt_pending(PREEMPT);
isolate_->stack_guard()->Preempt();
}
if (debug->is_interrupt_pending(DEBUGBREAK)) {
debug->clear_interrupt_pending(DEBUGBREAK);
isolate_->stack_guard()->DebugBreak();
}
// If there are commands in the queue when leaving the debugger request
// that these commands are processed.
if (isolate_->debugger()->HasCommands()) {
isolate_->stack_guard()->DebugCommand();
}
// If leaving the debugger with the debugger no longer active unload it.
if (!isolate_->debugger()->IsDebuggerActive()) {
isolate_->debugger()->UnloadDebugger();
}
}
// Leaving this debugger entry.
debug->set_debugger_entry(prev_);
}
// Check whether the debugger could be entered.
inline bool FailedToEnter() { return load_failed_; }

70
deps/v8/src/deoptimizer.cc

@ -52,13 +52,11 @@ DeoptimizerData::DeoptimizerData() {
DeoptimizerData::~DeoptimizerData() {
if (eager_deoptimization_entry_code_ != NULL) {
Isolate::Current()->memory_allocator()->Free(
eager_deoptimization_entry_code_);
eager_deoptimization_entry_code_->Free(EXECUTABLE);
eager_deoptimization_entry_code_ = NULL;
}
if (lazy_deoptimization_entry_code_ != NULL) {
Isolate::Current()->memory_allocator()->Free(
lazy_deoptimization_entry_code_);
lazy_deoptimization_entry_code_->Free(EXECUTABLE);
lazy_deoptimization_entry_code_ = NULL;
}
}
@ -73,8 +71,6 @@ void DeoptimizerData::Iterate(ObjectVisitor* v) {
#endif
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
Deoptimizer* Deoptimizer::New(JSFunction* function,
BailoutType type,
unsigned bailout_id,
@ -323,8 +319,6 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
input_(NULL),
output_count_(0),
output_(NULL),
frame_alignment_marker_(isolate->heap()->frame_alignment_marker()),
has_alignment_padding_(0),
deferred_heap_numbers_(0) {
if (FLAG_trace_deopt && type != OSR) {
if (type == DEBUGGER) {
@ -349,26 +343,6 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
if (type == EAGER) {
ASSERT(from == NULL);
optimized_code_ = function_->code();
if (FLAG_trace_deopt && FLAG_code_comments) {
// Print instruction associated with this bailout.
const char* last_comment = NULL;
int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
| RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
for (RelocIterator it(optimized_code_, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
if (info->rmode() == RelocInfo::COMMENT) {
last_comment = reinterpret_cast<const char*>(info->data());
}
if (info->rmode() == RelocInfo::RUNTIME_ENTRY) {
unsigned id = Deoptimizer::GetDeoptimizationId(
info->target_address(), Deoptimizer::EAGER);
if (id == bailout_id && last_comment != NULL) {
PrintF(" %s\n", last_comment);
break;
}
}
}
}
} else if (type == LAZY) {
optimized_code_ = FindDeoptimizingCodeFromAddress(from);
ASSERT(optimized_code_ != NULL);
@ -412,7 +386,7 @@ void Deoptimizer::DeleteFrameDescriptions() {
Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
ASSERT(id >= 0);
if (id >= kNumberOfEntries) return NULL;
MemoryChunk* base = NULL;
LargeObjectChunk* base = NULL;
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
if (data->eager_deoptimization_entry_code_ == NULL) {
@ -426,12 +400,12 @@ Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
base = data->lazy_deoptimization_entry_code_;
}
return
static_cast<Address>(base->body()) + (id * table_entry_size_);
static_cast<Address>(base->GetStartAddress()) + (id * table_entry_size_);
}
int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
MemoryChunk* base = NULL;
LargeObjectChunk* base = NULL;
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
base = data->eager_deoptimization_entry_code_;
@ -439,14 +413,14 @@ int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
base = data->lazy_deoptimization_entry_code_;
}
if (base == NULL ||
addr < base->body() ||
addr >= base->body() +
addr < base->GetStartAddress() ||
addr >= base->GetStartAddress() +
(kNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry;
}
ASSERT_EQ(0,
static_cast<int>(addr - base->body()) % table_entry_size_);
return static_cast<int>(addr - base->body()) / table_entry_size_;
static_cast<int>(addr - base->GetStartAddress()) % table_entry_size_);
return static_cast<int>(addr - base->GetStartAddress()) / table_entry_size_;
}
@ -488,8 +462,6 @@ int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
}
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
void Deoptimizer::DoComputeOutputFrames() {
if (bailout_type_ == OSR) {
DoComputeOsrOutputFrame();
@ -641,13 +613,11 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
intptr_t input_value = input_->GetRegister(input_reg);
if (FLAG_trace_deopt) {
PrintF(
" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s ",
" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s\n",
output_[frame_index]->GetTop() + output_offset,
output_offset,
input_value,
converter.NameOfCPURegister(input_reg));
reinterpret_cast<Object*>(input_value)->ShortPrint();
PrintF("\n");
}
output_[frame_index]->SetFrameSlot(output_offset, input_value);
return;
@ -705,12 +675,10 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] ",
PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
output_offset,
input_value,
input_offset);
reinterpret_cast<Object*>(input_value)->ShortPrint();
PrintF("\n");
}
output_[frame_index]->SetFrameSlot(output_offset, input_value);
return;
@ -985,10 +953,7 @@ void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
for (uint32_t i = 0; i < table_length; ++i) {
uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
Address pc_after = unoptimized_code->instruction_start() + pc_offset;
PatchStackCheckCodeAt(unoptimized_code,
pc_after,
check_code,
replacement_code);
PatchStackCheckCodeAt(pc_after, check_code, replacement_code);
stack_check_cursor += 2 * kIntSize;
}
}
@ -1074,7 +1039,7 @@ void Deoptimizer::AddDoubleValue(intptr_t slot_address,
}
MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
LargeObjectChunk* Deoptimizer::CreateCode(BailoutType type) {
// We cannot run this if the serializer is enabled because this will
// cause us to emit relocation information for the external
// references. This is fine because the deoptimizer's code section
@ -1088,15 +1053,12 @@ MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
masm.GetCode(&desc);
ASSERT(desc.reloc_size == 0);
MemoryChunk* chunk =
Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
EXECUTABLE,
NULL);
LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
if (chunk == NULL) {
V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
}
memcpy(chunk->body(), desc.buffer, desc.instr_size);
CPU::FlushICache(chunk->body(), desc.instr_size);
memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
return chunk;
}

18
deps/v8/src/deoptimizer.h

@ -86,8 +86,8 @@ class DeoptimizerData {
#endif
private:
MemoryChunk* eager_deoptimization_entry_code_;
MemoryChunk* lazy_deoptimization_entry_code_;
LargeObjectChunk* eager_deoptimization_entry_code_;
LargeObjectChunk* lazy_deoptimization_entry_code_;
Deoptimizer* current_;
#ifdef ENABLE_DEBUGGER_SUPPORT
@ -173,8 +173,7 @@ class Deoptimizer : public Malloced {
// Patch stack guard check at instruction before pc_after in
// the unoptimized code to unconditionally call replacement_code.
static void PatchStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
static void PatchStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code);
@ -212,11 +211,6 @@ class Deoptimizer : public Malloced {
return OFFSET_OF(Deoptimizer, output_count_);
}
static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
static int frame_alignment_marker_offset() {
return OFFSET_OF(Deoptimizer, frame_alignment_marker_); }
static int has_alignment_padding_offset() {
return OFFSET_OF(Deoptimizer, has_alignment_padding_);
}
static int GetDeoptimizedCodeCount(Isolate* isolate);
@ -291,7 +285,7 @@ class Deoptimizer : public Malloced {
void AddDoubleValue(intptr_t slot_address, double value);
static MemoryChunk* CreateCode(BailoutType type);
static LargeObjectChunk* CreateCode(BailoutType type);
static void GenerateDeoptimizationEntries(
MacroAssembler* masm, int count, BailoutType type);
@ -321,10 +315,6 @@ class Deoptimizer : public Malloced {
// Array of output frame descriptions.
FrameDescription** output_;
// Frames can be dynamically padded on ia32 to align untagged doubles.
Object* frame_alignment_marker_;
intptr_t has_alignment_padding_;
List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
static const int table_entry_size_;

2
deps/v8/src/disassembler.cc

@ -200,7 +200,7 @@ static int DecodeIt(FILE* f,
// Print all the reloc info for this instruction which are not comments.
for (int i = 0; i < pcs.length(); i++) {
// Put together the reloc info
RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], NULL);
RelocInfo relocinfo(pcs[i], rmodes[i], datas[i]);
// Indent the printing of the reloc info.
if (i == 0) {

11
deps/v8/src/elements.cc

@ -227,9 +227,7 @@ class FastElementsAccessor
public:
static MaybeObject* DeleteCommon(JSObject* obj,
uint32_t key) {
ASSERT(obj->HasFastElements() ||
obj->HasFastSmiOnlyElements() ||
obj->HasFastArgumentsElements());
ASSERT(obj->HasFastElements() || obj->HasFastArgumentsElements());
Heap* heap = obj->GetHeap();
FixedArray* backing_store = FixedArray::cast(obj->elements());
if (backing_store->map() == heap->non_strict_arguments_elements_map()) {
@ -598,9 +596,6 @@ ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) {
void ElementsAccessor::InitializeOncePerProcess() {
static struct ConcreteElementsAccessors {
// Use the fast element handler for smi-only arrays. The implementation is
// currently identical.
FastElementsAccessor fast_smi_elements_handler;
FastElementsAccessor fast_elements_handler;
FastDoubleElementsAccessor fast_double_elements_handler;
DictionaryElementsAccessor dictionary_elements_handler;
@ -617,7 +612,6 @@ void ElementsAccessor::InitializeOncePerProcess() {
} element_accessors;
static ElementsAccessor* accessor_array[] = {
&element_accessors.fast_smi_elements_handler,
&element_accessors.fast_elements_handler,
&element_accessors.fast_double_elements_handler,
&element_accessors.dictionary_elements_handler,
@ -633,9 +627,6 @@ void ElementsAccessor::InitializeOncePerProcess() {
&element_accessors.pixel_elements_handler
};
STATIC_ASSERT((sizeof(accessor_array) / sizeof(*accessor_array)) ==
kElementsKindCount);
elements_accessors_ = accessor_array;
}

153
deps/v8/src/execution.cc

@ -33,7 +33,6 @@
#include "bootstrapper.h"
#include "codegen.h"
#include "debug.h"
#include "isolate-inl.h"
#include "runtime-profiler.h"
#include "simulator.h"
#include "v8threads.h"
@ -66,13 +65,13 @@ void StackGuard::reset_limits(const ExecutionAccess& lock) {
}
static Handle<Object> Invoke(bool is_construct,
Handle<JSFunction> function,
static Handle<Object> Invoke(bool construct,
Handle<JSFunction> func,
Handle<Object> receiver,
int argc,
Handle<Object> args[],
Object*** args,
bool* has_pending_exception) {
Isolate* isolate = function->GetIsolate();
Isolate* isolate = func->GetIsolate();
// Entering JavaScript.
VMState state(isolate, JS);
@ -80,15 +79,21 @@ static Handle<Object> Invoke(bool is_construct,
// Placeholder for return value.
MaybeObject* value = reinterpret_cast<Object*>(kZapValue);
typedef Object* (*JSEntryFunction)(byte* entry,
typedef Object* (*JSEntryFunction)(
byte* entry,
Object* function,
Object* receiver,
int argc,
Object*** args);
Handle<Code> code = is_construct
? isolate->factory()->js_construct_entry_code()
: isolate->factory()->js_entry_code();
Handle<Code> code;
if (construct) {
JSConstructEntryStub stub;
code = stub.GetCode();
} else {
JSEntryStub stub;
code = stub.GetCode();
}
// Convert calls on global objects to be calls on the global
// receiver instead to avoid having a 'this' pointer which refers
@ -100,22 +105,21 @@ static Handle<Object> Invoke(bool is_construct,
// Make sure that the global object of the context we're about to
// make the current one is indeed a global object.
ASSERT(function->context()->global()->IsGlobalObject());
ASSERT(func->context()->global()->IsGlobalObject());
{
// Save and restore context around invocation and block the
// allocation of handles without explicit handle scopes.
SaveContext save(isolate);
NoHandleAllocation na;
JSEntryFunction stub_entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
JSEntryFunction entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
// Call the function through the right JS entry stub.
byte* function_entry = function->code()->entry();
JSFunction* func = *function;
Object* recv = *receiver;
Object*** argv = reinterpret_cast<Object***>(args);
value =
CALL_GENERATED_CODE(stub_entry, function_entry, func, recv, argc, argv);
byte* entry_address = func->code()->entry();
JSFunction* function = *func;
Object* receiver_pointer = *receiver;
value = CALL_GENERATED_CODE(entry, entry_address, function,
receiver_pointer, argc, args);
}
#ifdef DEBUG
@ -144,11 +148,9 @@ static Handle<Object> Invoke(bool is_construct,
Handle<Object> Execution::Call(Handle<Object> callable,
Handle<Object> receiver,
int argc,
Handle<Object> argv[],
Object*** args,
bool* pending_exception,
bool convert_receiver) {
*pending_exception = false;
if (!callable->IsJSFunction()) {
callable = TryGetFunctionDelegate(callable, pending_exception);
if (*pending_exception) return callable;
@ -170,15 +172,13 @@ Handle<Object> Execution::Call(Handle<Object> callable,
if (*pending_exception) return callable;
}
return Invoke(false, func, receiver, argc, argv, pending_exception);
return Invoke(false, func, receiver, argc, args, pending_exception);
}
Handle<Object> Execution::New(Handle<JSFunction> func,
int argc,
Handle<Object> argv[],
bool* pending_exception) {
return Invoke(true, func, Isolate::Current()->global(), argc, argv,
Handle<Object> Execution::New(Handle<JSFunction> func, int argc,
Object*** args, bool* pending_exception) {
return Invoke(true, func, Isolate::Current()->global(), argc, args,
pending_exception);
}
@ -186,7 +186,7 @@ Handle<Object> Execution::New(Handle<JSFunction> func,
Handle<Object> Execution::TryCall(Handle<JSFunction> func,
Handle<Object> receiver,
int argc,
Handle<Object> args[],
Object*** args,
bool* caught_exception) {
// Enter a try-block while executing the JavaScript code. To avoid
// duplicate error printing it must be non-verbose. Also, to avoid
@ -195,7 +195,6 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
v8::TryCatch catcher;
catcher.SetVerbose(false);
catcher.SetCaptureMessage(false);
*caught_exception = false;
Handle<Object> result = Invoke(false, func, receiver, argc, args,
caught_exception);
@ -378,7 +377,7 @@ void StackGuard::DisableInterrupts() {
bool StackGuard::IsInterrupted() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & INTERRUPT) != 0;
return thread_local_.interrupt_flags_ & INTERRUPT;
}
@ -404,7 +403,7 @@ void StackGuard::Preempt() {
bool StackGuard::IsTerminateExecution() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & TERMINATE) != 0;
return thread_local_.interrupt_flags_ & TERMINATE;
}
@ -417,7 +416,7 @@ void StackGuard::TerminateExecution() {
bool StackGuard::IsRuntimeProfilerTick() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK) != 0;
return thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK;
}
@ -434,22 +433,6 @@ void StackGuard::RequestRuntimeProfilerTick() {
}
bool StackGuard::IsGCRequest() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & GC_REQUEST) != 0;
}
void StackGuard::RequestGC() {
ExecutionAccess access(isolate_);
thread_local_.interrupt_flags_ |= GC_REQUEST;
if (thread_local_.postpone_interrupts_nesting_ == 0) {
thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
isolate_->heap()->SetStackLimits();
}
}
#ifdef ENABLE_DEBUGGER_SUPPORT
bool StackGuard::IsDebugBreak() {
ExecutionAccess access(isolate_);
@ -572,14 +555,13 @@ void StackGuard::InitThread(const ExecutionAccess& lock) {
// --- C a l l s t o n a t i v e s ---
#define RETURN_NATIVE_CALL(name, args, has_pending_exception) \
#define RETURN_NATIVE_CALL(name, argc, argv, has_pending_exception) \
do { \
Isolate* isolate = Isolate::Current(); \
Handle<Object> argv[] = args; \
Object** args[argc] = argv; \
ASSERT(has_pending_exception != NULL); \
return Call(isolate->name##_fun(), \
isolate->js_builtins_object(), \
ARRAY_SIZE(argv), argv, \
isolate->js_builtins_object(), argc, args, \
has_pending_exception); \
} while (false)
@ -601,44 +583,44 @@ Handle<Object> Execution::ToBoolean(Handle<Object> obj) {
Handle<Object> Execution::ToNumber(Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_number, { obj }, exc);
RETURN_NATIVE_CALL(to_number, 1, { obj.location() }, exc);
}
Handle<Object> Execution::ToString(Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_string, { obj }, exc);
RETURN_NATIVE_CALL(to_string, 1, { obj.location() }, exc);
}
Handle<Object> Execution::ToDetailString(Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_detail_string, { obj }, exc);
RETURN_NATIVE_CALL(to_detail_string, 1, { obj.location() }, exc);
}
Handle<Object> Execution::ToObject(Handle<Object> obj, bool* exc) {
if (obj->IsSpecObject()) return obj;
RETURN_NATIVE_CALL(to_object, { obj }, exc);
RETURN_NATIVE_CALL(to_object, 1, { obj.location() }, exc);
}
Handle<Object> Execution::ToInteger(Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_integer, { obj }, exc);
RETURN_NATIVE_CALL(to_integer, 1, { obj.location() }, exc);
}
Handle<Object> Execution::ToUint32(Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_uint32, { obj }, exc);
RETURN_NATIVE_CALL(to_uint32, 1, { obj.location() }, exc);
}
Handle<Object> Execution::ToInt32(Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_int32, { obj }, exc);
RETURN_NATIVE_CALL(to_int32, 1, { obj.location() }, exc);
}
Handle<Object> Execution::NewDate(double time, bool* exc) {
Handle<Object> time_obj = FACTORY->NewNumber(time);
RETURN_NATIVE_CALL(create_date, { time_obj }, exc);
RETURN_NATIVE_CALL(create_date, 1, { time_obj.location() }, exc);
}
@ -675,7 +657,7 @@ Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
bool caught_exception;
Handle<Object> index_object = factory->NewNumberFromInt(int_index);
Handle<Object> index_arg[] = { index_object };
Object** index_arg[] = { index_object.location() };
Handle<Object> result = TryCall(Handle<JSFunction>::cast(char_at),
string,
ARRAY_SIZE(index_arg),
@ -689,8 +671,7 @@ Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
Handle<JSFunction> Execution::InstantiateFunction(
Handle<FunctionTemplateInfo> data,
bool* exc) {
Handle<FunctionTemplateInfo> data, bool* exc) {
Isolate* isolate = data->GetIsolate();
// Fast case: see if the function has already been instantiated
int serial_number = Smi::cast(data->serial_number())->value();
@ -699,12 +680,10 @@ Handle<JSFunction> Execution::InstantiateFunction(
GetElementNoExceptionThrown(serial_number);
if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
// The function has not yet been instantiated in this context; do it.
Handle<Object> args[] = { data };
Handle<Object> result = Call(isolate->instantiate_fun(),
isolate->js_builtins_object(),
ARRAY_SIZE(args),
args,
exc);
Object** args[1] = { Handle<Object>::cast(data).location() };
Handle<Object> result =
Call(isolate->instantiate_fun(),
isolate->js_builtins_object(), 1, args, exc);
if (*exc) return Handle<JSFunction>::null();
return Handle<JSFunction>::cast(result);
}
@ -731,12 +710,10 @@ Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data,
ASSERT(!*exc);
return Handle<JSObject>(JSObject::cast(result));
} else {
Handle<Object> args[] = { data };
Handle<Object> result = Call(isolate->instantiate_fun(),
isolate->js_builtins_object(),
ARRAY_SIZE(args),
args,
exc);
Object** args[1] = { Handle<Object>::cast(data).location() };
Handle<Object> result =
Call(isolate->instantiate_fun(),
isolate->js_builtins_object(), 1, args, exc);
if (*exc) return Handle<JSObject>::null();
return Handle<JSObject>::cast(result);
}
@ -747,12 +724,9 @@ void Execution::ConfigureInstance(Handle<Object> instance,
Handle<Object> instance_template,
bool* exc) {
Isolate* isolate = Isolate::Current();
Handle<Object> args[] = { instance, instance_template };
Object** args[2] = { instance.location(), instance_template.location() };
Execution::Call(isolate->configure_instance_fun(),
isolate->js_builtins_object(),
ARRAY_SIZE(args),
args,
exc);
isolate->js_builtins_object(), 2, args, exc);
}
@ -761,12 +735,15 @@ Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
Handle<Object> pos,
Handle<Object> is_global) {
Isolate* isolate = fun->GetIsolate();
Handle<Object> args[] = { recv, fun, pos, is_global };
bool caught_exception;
Handle<Object> result = TryCall(isolate->get_stack_trace_line_fun(),
isolate->js_builtins_object(),
ARRAY_SIZE(args),
args,
const int argc = 4;
Object** args[argc] = { recv.location(),
Handle<Object>::cast(fun).location(),
pos.location(),
is_global.location() };
bool caught_exception = false;
Handle<Object> result =
TryCall(isolate->get_stack_trace_line_fun(),
isolate->js_builtins_object(), argc, args,
&caught_exception);
if (caught_exception || !result->IsString()) {
return isolate->factory()->empty_symbol();
@ -875,12 +852,6 @@ void Execution::ProcessDebugMesssages(bool debug_command_only) {
MaybeObject* Execution::HandleStackGuardInterrupt() {
Isolate* isolate = Isolate::Current();
StackGuard* stack_guard = isolate->stack_guard();
if (stack_guard->IsGCRequest()) {
isolate->heap()->CollectAllGarbage(false);
stack_guard->Continue(GC_REQUEST);
}
isolate->counters()->stack_interrupts()->Increment();
if (stack_guard->IsRuntimeProfilerTick()) {
isolate->counters()->runtime_profiler_ticks()->Increment();

13
deps/v8/src/execution.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -41,8 +41,7 @@ enum InterruptFlag {
DEBUGCOMMAND = 1 << 2,
PREEMPT = 1 << 3,
TERMINATE = 1 << 4,
RUNTIME_PROFILER_TICK = 1 << 5,
GC_REQUEST = 1 << 6
RUNTIME_PROFILER_TICK = 1 << 5
};
class Execution : public AllStatic {
@ -61,7 +60,7 @@ class Execution : public AllStatic {
static Handle<Object> Call(Handle<Object> callable,
Handle<Object> receiver,
int argc,
Handle<Object> argv[],
Object*** args,
bool* pending_exception,
bool convert_receiver = false);
@ -74,7 +73,7 @@ class Execution : public AllStatic {
//
static Handle<Object> New(Handle<JSFunction> func,
int argc,
Handle<Object> argv[],
Object*** args,
bool* pending_exception);
// Call a function, just like Call(), but make sure to silently catch
@ -84,7 +83,7 @@ class Execution : public AllStatic {
static Handle<Object> TryCall(Handle<JSFunction> func,
Handle<Object> receiver,
int argc,
Handle<Object> argv[],
Object*** args,
bool* caught_exception);
// ECMA-262 9.2
@ -197,8 +196,6 @@ class StackGuard {
bool IsDebugCommand();
void DebugCommand();
#endif
bool IsGCRequest();
void RequestGC();
void Continue(InterruptFlag after_what);
// This provides an asynchronous read of the stack limits for the current

7
deps/v8/src/extensions/gc-extension.cc

@ -40,7 +40,12 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
bool compact = false;
// All allocation spaces other than NEW_SPACE have the same effect.
if (args.Length() >= 1 && args[0]->IsBoolean()) {
compact = args[0]->BooleanValue();
}
HEAP->CollectAllGarbage(compact);
return v8::Undefined();
}

96
deps/v8/src/factory.cc

@ -234,7 +234,7 @@ Handle<String> Factory::NewProperSubString(Handle<String> str,
Handle<String> Factory::NewExternalStringFromAscii(
const ExternalAsciiString::Resource* resource) {
ExternalAsciiString::Resource* resource) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateExternalStringFromAscii(resource),
@ -243,7 +243,7 @@ Handle<String> Factory::NewExternalStringFromAscii(
Handle<String> Factory::NewExternalStringFromTwoByte(
const ExternalTwoByteString::Resource* resource) {
ExternalTwoByteString::Resource* resource) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateExternalStringFromTwoByte(resource),
@ -404,12 +404,10 @@ Handle<JSGlobalPropertyCell> Factory::NewJSGlobalPropertyCell(
}
Handle<Map> Factory::NewMap(InstanceType type,
int instance_size,
ElementsKind elements_kind) {
Handle<Map> Factory::NewMap(InstanceType type, int instance_size) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateMap(type, instance_size, elements_kind),
isolate()->heap()->AllocateMap(type, instance_size),
Map);
}
@ -457,11 +455,23 @@ Handle<Map> Factory::CopyMapDropTransitions(Handle<Map> src) {
}
Handle<Map> Factory::GetFastElementsMap(Handle<Map> src) {
CALL_HEAP_FUNCTION(isolate(), src->GetFastElementsMap(), Map);
}
Handle<Map> Factory::GetSlowElementsMap(Handle<Map> src) {
CALL_HEAP_FUNCTION(isolate(), src->GetSlowElementsMap(), Map);
}
Handle<Map> Factory::GetElementsTransitionMap(
Handle<JSObject> src,
ElementsKind elements_kind) {
Handle<Map> src,
ElementsKind elements_kind,
bool safe_to_add_transition) {
CALL_HEAP_FUNCTION(isolate(),
src->GetElementsTransitionMap(elements_kind),
src->GetElementsTransitionMap(elements_kind,
safe_to_add_transition),
Map);
}
@ -631,16 +641,14 @@ Handle<Object> Factory::NewError(const char* maker,
return undefined_value();
Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
Handle<Object> type_obj = LookupAsciiSymbol(type);
Handle<Object> argv[] = { type_obj, args };
Object** argv[2] = { type_obj.location(),
Handle<Object>::cast(args).location() };
// Invoke the JavaScript factory method. If an exception is thrown while
// running the factory method, use the exception as the result.
bool caught_exception;
Handle<Object> result = Execution::TryCall(fun,
isolate()->js_builtins_object(),
ARRAY_SIZE(argv),
argv,
&caught_exception);
isolate()->js_builtins_object(), 2, argv, &caught_exception);
return result;
}
@ -656,16 +664,13 @@ Handle<Object> Factory::NewError(const char* constructor,
Handle<JSFunction> fun = Handle<JSFunction>(
JSFunction::cast(isolate()->js_builtins_object()->
GetPropertyNoExceptionThrown(*constr)));
Handle<Object> argv[] = { message };
Object** argv[1] = { Handle<Object>::cast(message).location() };
// Invoke the JavaScript factory method. If an exception is thrown while
// running the factory method, use the exception as the result.
bool caught_exception;
Handle<Object> result = Execution::TryCall(fun,
isolate()->js_builtins_object(),
ARRAY_SIZE(argv),
argv,
&caught_exception);
isolate()->js_builtins_object(), 1, argv, &caught_exception);
return result;
}
@ -717,12 +722,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
if (force_initial_map ||
type != JS_OBJECT_TYPE ||
instance_size != JSObject::kHeaderSize) {
ElementsKind default_elements_kind = FLAG_smi_only_arrays
? FAST_SMI_ONLY_ELEMENTS
: FAST_ELEMENTS;
Handle<Map> initial_map = NewMap(type,
instance_size,
default_elements_kind);
Handle<Map> initial_map = NewMap(type, instance_size);
function->set_initial_map(*initial_map);
initial_map->set_constructor(*function);
}
@ -908,26 +908,11 @@ Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArray> elements,
Handle<JSArray> result =
Handle<JSArray>::cast(NewJSObject(isolate()->array_function(),
pretenure));
SetContent(result, elements);
result->SetContent(*elements);
return result;
}
void Factory::SetContent(Handle<JSArray> array,
Handle<FixedArray> elements) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
array->SetContent(*elements));
}
void Factory::EnsureCanContainNonSmiElements(Handle<JSArray> array) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
array->EnsureCanContainNonSmiElements());
}
Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler,
Handle<Object> prototype) {
CALL_HEAP_FUNCTION(
@ -953,13 +938,6 @@ void Factory::BecomeJSFunction(Handle<JSReceiver> object) {
}
void Factory::SetIdentityHash(Handle<JSObject> object, Object* hash) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
object->SetIdentityHash(hash, ALLOW_CREATION));
}
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<String> name,
int number_of_literals,
@ -1012,12 +990,6 @@ Handle<String> Factory::NumberToString(Handle<Object> number) {
}
Handle<String> Factory::Uint32ToString(uint32_t value) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->Uint32ToString(value), String);
}
Handle<NumberDictionary> Factory::DictionaryAtNumberPut(
Handle<NumberDictionary> dictionary,
uint32_t key,
@ -1327,20 +1299,4 @@ void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc,
}
Handle<Object> Factory::GlobalConstantFor(Handle<String> name) {
Heap* h = isolate()->heap();
if (name->Equals(h->undefined_symbol())) return undefined_value();
if (name->Equals(h->nan_symbol())) return nan_value();
if (name->Equals(h->infinity_symbol())) return infinity_value();
return Handle<Object>::null();
}
Handle<Object> Factory::ToBoolean(bool value) {
return Handle<Object>(value
? isolate()->heap()->true_value()
: isolate()->heap()->false_value());
}
} } // namespace v8::internal

32
deps/v8/src/factory.h

@ -145,9 +145,9 @@ class Factory {
// not make sense to have a UTF-8 factory function for external strings,
// because we cannot change the underlying buffer.
Handle<String> NewExternalStringFromAscii(
const ExternalAsciiString::Resource* resource);
ExternalAsciiString::Resource* resource);
Handle<String> NewExternalStringFromTwoByte(
const ExternalTwoByteString::Resource* resource);
ExternalTwoByteString::Resource* resource);
// Create a global (but otherwise uninitialized) context.
Handle<Context> NewGlobalContext();
@ -203,9 +203,7 @@ class Factory {
Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
Handle<Object> value);
Handle<Map> NewMap(InstanceType type,
int instance_size,
ElementsKind elements_kind = FAST_ELEMENTS);
Handle<Map> NewMap(InstanceType type, int instance_size);
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
@ -217,8 +215,13 @@ class Factory {
Handle<Map> CopyMapDropTransitions(Handle<Map> map);
Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
ElementsKind elements_kind);
Handle<Map> GetFastElementsMap(Handle<Map> map);
Handle<Map> GetSlowElementsMap(Handle<Map> map);
Handle<Map> GetElementsTransitionMap(Handle<Map> map,
ElementsKind elements_kind,
bool safe_to_add_transition);
Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
@ -255,18 +258,12 @@ class Factory {
Handle<FixedArray> elements,
PretenureFlag pretenure = NOT_TENURED);
void SetContent(Handle<JSArray> array, Handle<FixedArray> elements);
void EnsureCanContainNonSmiElements(Handle<JSArray> array);
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
// Change the type of the argument into a JS object/function and reinitialize.
void BecomeJSObject(Handle<JSReceiver> object);
void BecomeJSFunction(Handle<JSReceiver> object);
void SetIdentityHash(Handle<JSObject> object, Object* hash);
Handle<JSFunction> NewFunction(Handle<String> name,
Handle<Object> prototype);
@ -359,7 +356,6 @@ class Factory {
PropertyAttributes attributes);
Handle<String> NumberToString(Handle<Object> number);
Handle<String> Uint32ToString(uint32_t value);
enum ApiInstanceType {
JavaScriptObject,
@ -446,14 +442,6 @@ class Factory {
JSRegExp::Flags flags,
int capture_count);
// Returns the value for a known global constant (a property of the global
// object which is neither configurable nor writable) like 'undefined'.
// Returns a null handle when the given name is unknown.
Handle<Object> GlobalConstantFor(Handle<String> name);
// Converts the given boolean condition to JavaScript boolean value.
Handle<Object> ToBoolean(bool value);
private:
Isolate* isolate() { return reinterpret_cast<Isolate*>(this); }

38
deps/v8/src/flag-definitions.h

@ -98,19 +98,13 @@ private:
// Flags for experimental language features.
DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
DEFINE_bool(harmony_scoping, false, "enable harmony block scoping")
DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
DEFINE_bool(harmony_weakmaps, false, "enable harmony weak maps")
DEFINE_bool(harmony, false, "enable all harmony features")
DEFINE_bool(harmony_block_scoping, false, "enable harmony block scoping")
// Flags for experimental implementation features.
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
DEFINE_bool(smi_only_arrays, false, "tracks arrays with only smi values")
DEFINE_bool(string_slices, true, "use string slices")
DEFINE_bool(clever_optimizations,
true,
"Optimize object size, Array shift, DOM strings and string +")
DEFINE_bool(string_slices, false, "use string slices")
// Flags for Crankshaft.
#ifdef V8_TARGET_ARCH_MIPS
@ -259,16 +253,10 @@ DEFINE_bool(print_cumulative_gc_stat, false,
"print cumulative GC statistics in name=value format on exit")
DEFINE_bool(trace_gc_verbose, false,
"print more details following each garbage collection")
DEFINE_bool(trace_fragmentation, false,
"report fragmentation for old pointer and data pages")
DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached")
DEFINE_bool(flush_code, true,
"flush code that we expect not to use again before full gc")
DEFINE_bool(incremental_marking, true, "use incremental marking")
DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps")
DEFINE_bool(trace_incremental_marking, false,
"trace progress of the incremental marking")
// v8.cc
DEFINE_bool(use_idle_notification, true,
@ -288,13 +276,8 @@ DEFINE_bool(native_code_counters, false,
// mark-compact.cc
DEFINE_bool(always_compact, false, "Perform compaction on every full GC")
DEFINE_bool(lazy_sweeping, true,
"Use lazy sweeping for old pointer and data spaces")
DEFINE_bool(cleanup_caches_in_maps_at_gc, true,
"Flush code caches in maps during mark compact cycle.")
DEFINE_bool(never_compact, false,
"Never perform compaction on full GC - testing only")
DEFINE_bool(compact_code_space, false, "Compact code space")
DEFINE_bool(cleanup_code_caches_at_gc, true,
"Flush inline caches prior to mark compact collection and "
"flush code caches in maps during mark compact cycle.")
@ -305,6 +288,9 @@ DEFINE_int(random_seed, 0,
DEFINE_bool(canonicalize_object_literal_maps, true,
"Canonicalize maps for object literals.")
DEFINE_bool(use_big_map_space, true,
"Use big map space, but don't compact if it grew too big.")
DEFINE_int(max_map_space_pages, MapSpace::kMaxMapPageIndex - 1,
"Maximum number of pages in map space which still allows to encode "
"forwarding pointers. That's actually a constant, but it's useful "
@ -340,6 +326,7 @@ DEFINE_bool(preemption, false,
// Regexp
DEFINE_bool(regexp_optimization, true, "generate optimized regexp code")
DEFINE_bool(regexp_entry_native, true, "use native code to enter regexp")
// Testing flags test/cctest/test-{flags,api,serialization}.cc
DEFINE_bool(testing_bool_flag, true, "testing_bool_flag")
@ -361,15 +348,11 @@ DEFINE_string(testing_serialization_file, "/tmp/serdes",
DEFINE_bool(help, false, "Print usage message, including flags, on console")
DEFINE_bool(dump_counters, false, "Dump counters on exit")
#ifdef ENABLE_DEBUGGER_SUPPORT
DEFINE_bool(debugger, false, "Enable JavaScript debugger")
DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the "
"debugger agent in another process")
DEFINE_bool(debugger_agent, false, "Enable debugger agent")
DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
#endif // ENABLE_DEBUGGER_SUPPORT
DEFINE_string(map_counters, "", "Map counters to a file")
DEFINE_args(js_arguments, JSArguments(),
"Pass all remaining arguments to the script. Alias for \"--\".")
@ -395,15 +378,6 @@ DEFINE_bool(gdbjit_dump, false, "dump elf objects with debug info to disk")
DEFINE_string(gdbjit_dump_filter, "",
"dump only objects containing this substring")
// mark-compact.cc
DEFINE_bool(force_marking_deque_overflows, false,
"force overflows of marking deque by reducing it's size "
"to 64 words")
DEFINE_bool(stress_compaction, false,
"stress the GC compactor to flush out bugs (implies "
"--force_marking_deque_overflows)")
//
// Debug only flags
//

76
deps/v8/src/frames-inl.h

@ -77,21 +77,6 @@ inline StackHandler* StackHandler::FromAddress(Address address) {
}
inline bool StackHandler::is_entry() const {
return state() == ENTRY;
}
inline bool StackHandler::is_try_catch() const {
return state() == TRY_CATCH;
}
inline bool StackHandler::is_try_finally() const {
return state() == TRY_FINALLY;
}
inline StackHandler::State StackHandler::state() const {
const int offset = StackHandlerConstants::kStateOffset;
return static_cast<State>(Memory::int_at(address() + offset));
@ -120,33 +105,8 @@ inline StackHandler* StackFrame::top_handler() const {
}
inline Code* StackFrame::LookupCode() const {
return GetContainingCode(isolate(), pc());
}
inline Code* StackFrame::GetContainingCode(Isolate* isolate, Address pc) {
return isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
}
inline EntryFrame::EntryFrame(StackFrameIterator* iterator)
: StackFrame(iterator) {
}
inline EntryConstructFrame::EntryConstructFrame(StackFrameIterator* iterator)
: EntryFrame(iterator) {
}
inline ExitFrame::ExitFrame(StackFrameIterator* iterator)
: StackFrame(iterator) {
}
inline StandardFrame::StandardFrame(StackFrameIterator* iterator)
: StackFrame(iterator) {
return isolate->pc_to_code_cache()->GetCacheEntry(pc)->code;
}
@ -195,11 +155,6 @@ inline bool StandardFrame::IsConstructFrame(Address fp) {
}
inline JavaScriptFrame::JavaScriptFrame(StackFrameIterator* iterator)
: StandardFrame(iterator) {
}
Address JavaScriptFrame::GetParameterSlot(int index) const {
int param_count = ComputeParametersCount();
ASSERT(-1 <= index && index < param_count);
@ -235,26 +190,6 @@ inline Object* JavaScriptFrame::function() const {
}
inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator)
: JavaScriptFrame(iterator) {
}
inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
StackFrameIterator* iterator) : JavaScriptFrame(iterator) {
}
inline InternalFrame::InternalFrame(StackFrameIterator* iterator)
: StandardFrame(iterator) {
}
inline ConstructFrame::ConstructFrame(StackFrameIterator* iterator)
: InternalFrame(iterator) {
}
template<typename Iterator>
inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
Isolate* isolate)
@ -262,15 +197,6 @@ inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
if (!done()) Advance();
}
template<typename Iterator>
inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
Isolate* isolate, ThreadLocalTop* top)
: iterator_(isolate, top) {
if (!done()) Advance();
}
template<typename Iterator>
inline JavaScriptFrame* JavaScriptFrameIteratorTemp<Iterator>::frame() const {
// TODO(1233797): The frame hierarchy needs to change. It's

117
deps/v8/src/frames.cc

@ -366,17 +366,16 @@ void SafeStackTraceFrameIterator::Advance() {
Code* StackFrame::GetSafepointData(Isolate* isolate,
Address inner_pointer,
Address pc,
SafepointEntry* safepoint_entry,
unsigned* stack_slots) {
InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry =
isolate->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer);
PcToCodeCache::PcToCodeCacheEntry* entry =
isolate->pc_to_code_cache()->GetCacheEntry(pc);
if (!entry->safepoint_entry.is_valid()) {
entry->safepoint_entry = entry->code->GetSafepointEntry(inner_pointer);
entry->safepoint_entry = entry->code->GetSafepointEntry(pc);
ASSERT(entry->safepoint_entry.is_valid());
} else {
ASSERT(entry->safepoint_entry.Equals(
entry->code->GetSafepointEntry(inner_pointer)));
ASSERT(entry->safepoint_entry.Equals(entry->code->GetSafepointEntry(pc)));
}
// Fill in the results and return the code.
@ -393,16 +392,11 @@ bool StackFrame::HasHandler() const {
}
#ifdef DEBUG
static bool GcSafeCodeContains(HeapObject* object, Address addr);
#endif
void StackFrame::IteratePc(ObjectVisitor* v,
Address* pc_address,
Code* holder) {
Address pc = *pc_address;
ASSERT(GcSafeCodeContains(holder, pc));
ASSERT(holder->contains(pc));
unsigned pc_offset = static_cast<unsigned>(pc - holder->instruction_start());
Object* code = holder;
v->VisitPointer(&code);
@ -825,8 +819,7 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
// back to a slow search in this case to find the original optimized
// code object.
if (!code->contains(pc())) {
code = isolate()->inner_pointer_to_code_cache()->
GcSafeFindCodeForInnerPointer(pc());
code = isolate()->pc_to_code_cache()->GcSafeFindCodeForPc(pc());
}
ASSERT(code != NULL);
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
@ -888,11 +881,6 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
}
int ArgumentsAdaptorFrame::GetNumberOfIncomingArguments() const {
return Smi::cast(GetExpression(0))->value();
}
Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
return fp() + StandardFrameConstants::kCallerSPOffset;
}
@ -1167,89 +1155,52 @@ JavaScriptFrame* StackFrameLocator::FindJavaScriptFrame(int n) {
// -------------------------------------------------------------------------
static Map* GcSafeMapOfCodeSpaceObject(HeapObject* object) {
MapWord map_word = object->map_word();
return map_word.IsForwardingAddress() ?
map_word.ToForwardingAddress()->map() : map_word.ToMap();
}
static int GcSafeSizeOfCodeSpaceObject(HeapObject* object) {
return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object));
}
#ifdef DEBUG
static bool GcSafeCodeContains(HeapObject* code, Address addr) {
Map* map = GcSafeMapOfCodeSpaceObject(code);
ASSERT(map == code->GetHeap()->code_map());
Address start = code->address();
Address end = code->address() + code->SizeFromMap(map);
return start <= addr && addr < end;
}
#endif
Code* InnerPointerToCodeCache::GcSafeCastToCode(HeapObject* object,
Address inner_pointer) {
Code* PcToCodeCache::GcSafeCastToCode(HeapObject* object, Address pc) {
Code* code = reinterpret_cast<Code*>(object);
ASSERT(code != NULL && GcSafeCodeContains(code, inner_pointer));
ASSERT(code != NULL && code->contains(pc));
return code;
}
Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
Address inner_pointer) {
Code* PcToCodeCache::GcSafeFindCodeForPc(Address pc) {
Heap* heap = isolate_->heap();
// Check if the inner pointer points into a large object chunk.
LargePage* large_page = heap->lo_space()->FindPageContainingPc(inner_pointer);
if (large_page != NULL) {
return GcSafeCastToCode(large_page->GetObject(), inner_pointer);
}
// Iterate through the page until we reach the end or find an object starting
// after the inner pointer.
Page* page = Page::FromAddress(inner_pointer);
Address addr = page->skip_list()->StartFor(inner_pointer);
Address top = heap->code_space()->top();
Address limit = heap->code_space()->limit();
// Check if the pc points into a large object chunk.
LargeObjectChunk* chunk = heap->lo_space()->FindChunkContainingPc(pc);
if (chunk != NULL) return GcSafeCastToCode(chunk->GetObject(), pc);
// Iterate through the 8K page until we reach the end or find an
// object starting after the pc.
Page* page = Page::FromAddress(pc);
HeapObjectIterator iterator(page, heap->GcSafeSizeOfOldObjectFunction());
HeapObject* previous = NULL;
while (true) {
if (addr == top && addr != limit) {
addr = limit;
continue;
HeapObject* next = iterator.next();
if (next == NULL || next->address() >= pc) {
return GcSafeCastToCode(previous, pc);
}
HeapObject* obj = HeapObject::FromAddress(addr);
int obj_size = GcSafeSizeOfCodeSpaceObject(obj);
Address next_addr = addr + obj_size;
if (next_addr > inner_pointer) return GcSafeCastToCode(obj, inner_pointer);
addr = next_addr;
previous = next;
}
}
InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
isolate_->counters()->pc_to_code()->Increment();
ASSERT(IsPowerOf2(kInnerPointerToCodeCacheSize));
ASSERT(IsPowerOf2(kPcToCodeCacheSize));
uint32_t hash = ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(inner_pointer)));
uint32_t index = hash & (kInnerPointerToCodeCacheSize - 1);
InnerPointerToCodeCacheEntry* entry = cache(index);
if (entry->inner_pointer == inner_pointer) {
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(pc)));
uint32_t index = hash & (kPcToCodeCacheSize - 1);
PcToCodeCacheEntry* entry = cache(index);
if (entry->pc == pc) {
isolate_->counters()->pc_to_code_cached()->Increment();
ASSERT(entry->code == GcSafeFindCodeForInnerPointer(inner_pointer));
ASSERT(entry->code == GcSafeFindCodeForPc(pc));
} else {
// Because this code may be interrupted by a profiling signal that
// also queries the cache, we cannot update inner_pointer before the code
// has been set. Otherwise, we risk trying to use a cache entry before
// also queries the cache, we cannot update pc before the code has
// been set. Otherwise, we risk trying to use a cache entry before
// the code has been computed.
entry->code = GcSafeFindCodeForInnerPointer(inner_pointer);
entry->code = GcSafeFindCodeForPc(pc);
entry->safepoint_entry.Reset();
entry->inner_pointer = inner_pointer;
entry->pc = pc;
}
return entry;
}

80
deps/v8/src/frames.h

@ -49,36 +49,36 @@ class StackFrameIterator;
class ThreadLocalTop;
class Isolate;
class InnerPointerToCodeCache {
class PcToCodeCache {
public:
struct InnerPointerToCodeCacheEntry {
Address inner_pointer;
struct PcToCodeCacheEntry {
Address pc;
Code* code;
SafepointEntry safepoint_entry;
};
explicit InnerPointerToCodeCache(Isolate* isolate) : isolate_(isolate) {
explicit PcToCodeCache(Isolate* isolate) : isolate_(isolate) {
Flush();
}
Code* GcSafeFindCodeForInnerPointer(Address inner_pointer);
Code* GcSafeCastToCode(HeapObject* object, Address inner_pointer);
Code* GcSafeFindCodeForPc(Address pc);
Code* GcSafeCastToCode(HeapObject* object, Address pc);
void Flush() {
memset(&cache_[0], 0, sizeof(cache_));
}
InnerPointerToCodeCacheEntry* GetCacheEntry(Address inner_pointer);
PcToCodeCacheEntry* GetCacheEntry(Address pc);
private:
InnerPointerToCodeCacheEntry* cache(int index) { return &cache_[index]; }
PcToCodeCacheEntry* cache(int index) { return &cache_[index]; }
Isolate* isolate_;
static const int kInnerPointerToCodeCacheSize = 1024;
InnerPointerToCodeCacheEntry cache_[kInnerPointerToCodeCacheSize];
static const int kPcToCodeCacheSize = 1024;
PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
DISALLOW_COPY_AND_ASSIGN(InnerPointerToCodeCache);
DISALLOW_COPY_AND_ASSIGN(PcToCodeCache);
};
@ -106,9 +106,9 @@ class StackHandler BASE_EMBEDDED {
static inline StackHandler* FromAddress(Address address);
// Testers
inline bool is_entry() const;
inline bool is_try_catch() const;
inline bool is_try_finally() const;
bool is_entry() { return state() == ENTRY; }
bool is_try_catch() { return state() == TRY_CATCH; }
bool is_try_finally() { return state() == TRY_FINALLY; }
private:
// Accessors.
@ -139,10 +139,7 @@ class StackFrame BASE_EMBEDDED {
enum Type {
NONE = 0,
STACK_FRAME_TYPE_LIST(DECLARE_TYPE)
NUMBER_OF_TYPES,
// Used by FrameScope to indicate that the stack frame is constructed
// manually and the FrameScope does not need to emit code.
MANUAL
NUMBER_OF_TYPES
};
#undef DECLARE_TYPE
@ -218,7 +215,9 @@ class StackFrame BASE_EMBEDDED {
virtual Code* unchecked_code() const = 0;
// Get the code associated with this frame.
inline Code* LookupCode() const;
Code* LookupCode() const {
return GetContainingCode(isolate(), pc());
}
// Get the code object that contains the given pc.
static inline Code* GetContainingCode(Isolate* isolate, Address pc);
@ -300,7 +299,7 @@ class EntryFrame: public StackFrame {
virtual void SetCallerFp(Address caller_fp);
protected:
inline explicit EntryFrame(StackFrameIterator* iterator);
explicit EntryFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
// The caller stack pointer for entry frames is always zero. The
// real information about the caller frame is available through the
@ -327,7 +326,8 @@ class EntryConstructFrame: public EntryFrame {
}
protected:
inline explicit EntryConstructFrame(StackFrameIterator* iterator);
explicit EntryConstructFrame(StackFrameIterator* iterator)
: EntryFrame(iterator) { }
private:
friend class StackFrameIterator;
@ -361,7 +361,7 @@ class ExitFrame: public StackFrame {
static void FillState(Address fp, Address sp, State* state);
protected:
inline explicit ExitFrame(StackFrameIterator* iterator);
explicit ExitFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
virtual Address GetCallerStackPointer() const;
@ -394,7 +394,8 @@ class StandardFrame: public StackFrame {
}
protected:
inline explicit StandardFrame(StackFrameIterator* iterator);
explicit StandardFrame(StackFrameIterator* iterator)
: StackFrame(iterator) { }
virtual void ComputeCallerState(State* state) const;
@ -513,7 +514,8 @@ class JavaScriptFrame: public StandardFrame {
}
protected:
inline explicit JavaScriptFrame(StackFrameIterator* iterator);
explicit JavaScriptFrame(StackFrameIterator* iterator)
: StandardFrame(iterator) { }
virtual Address GetCallerStackPointer() const;
@ -550,7 +552,8 @@ class OptimizedFrame : public JavaScriptFrame {
DeoptimizationInputData* GetDeoptimizationData(int* deopt_index);
protected:
inline explicit OptimizedFrame(StackFrameIterator* iterator);
explicit OptimizedFrame(StackFrameIterator* iterator)
: JavaScriptFrame(iterator) { }
private:
friend class StackFrameIterator;
@ -578,9 +581,12 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame {
int index) const;
protected:
inline explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator);
explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator)
: JavaScriptFrame(iterator) { }
virtual int GetNumberOfIncomingArguments() const;
virtual int GetNumberOfIncomingArguments() const {
return Smi::cast(GetExpression(0))->value();
}
virtual Address GetCallerStackPointer() const;
@ -605,7 +611,8 @@ class InternalFrame: public StandardFrame {
}
protected:
inline explicit InternalFrame(StackFrameIterator* iterator);
explicit InternalFrame(StackFrameIterator* iterator)
: StandardFrame(iterator) { }
virtual Address GetCallerStackPointer() const;
@ -626,7 +633,8 @@ class ConstructFrame: public InternalFrame {
}
protected:
inline explicit ConstructFrame(StackFrameIterator* iterator);
explicit ConstructFrame(StackFrameIterator* iterator)
: InternalFrame(iterator) { }
private:
friend class StackFrameIterator;
@ -702,26 +710,20 @@ class JavaScriptFrameIteratorTemp BASE_EMBEDDED {
inline explicit JavaScriptFrameIteratorTemp(Isolate* isolate);
inline JavaScriptFrameIteratorTemp(Isolate* isolate, ThreadLocalTop* top);
// Skip frames until the frame with the given id is reached.
explicit JavaScriptFrameIteratorTemp(StackFrame::Id id) { AdvanceToId(id); }
inline JavaScriptFrameIteratorTemp(Isolate* isolate, StackFrame::Id id);
JavaScriptFrameIteratorTemp(Address fp,
Address sp,
Address low_bound,
Address high_bound) :
JavaScriptFrameIteratorTemp(Address fp, Address sp,
Address low_bound, Address high_bound) :
iterator_(fp, sp, low_bound, high_bound) {
if (!done()) Advance();
}
JavaScriptFrameIteratorTemp(Isolate* isolate,
Address fp,
Address sp,
Address low_bound,
Address high_bound) :
Address fp, Address sp,
Address low_bound, Address high_bound) :
iterator_(isolate, fp, sp, low_bound, high_bound) {
if (!done()) Advance();
}

41
deps/v8/src/full-codegen.cc

@ -244,6 +244,11 @@ void BreakableStatementChecker::VisitBinaryOperation(BinaryOperation* expr) {
}
void BreakableStatementChecker::VisitCompareToNull(CompareToNull* expr) {
Visit(expr->expression());
}
void BreakableStatementChecker::VisitCompareOperation(CompareOperation* expr) {
Visit(expr->left());
Visit(expr->right());
@ -286,10 +291,8 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
code->set_optimizable(info->IsOptimizable());
cgen.PopulateDeoptimizationData(code);
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
#ifdef ENABLE_DEBUGGER_SUPPORT
code->set_has_debug_break_slots(
info->isolate()->debugger()->IsDebuggerActive());
#endif // ENABLE_DEBUGGER_SUPPORT
code->set_allow_osr_at_loop_nesting_level(0);
code->set_stack_check_table_offset(table_offset);
CodeGenerator::PrintCode(code, info);
@ -520,7 +523,7 @@ void FullCodeGenerator::VisitDeclarations(
if (var->IsUnallocated()) {
array->set(j++, *(var->name()));
if (decl->fun() == NULL) {
if (var->mode() == CONST) {
if (var->mode() == Variable::CONST) {
// In case this is const property use the hole.
array->set_the_hole(j++);
} else {
@ -820,19 +823,9 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
if (stmt->block_scope() != NULL) {
{ Comment cmnt(masm_, "[ Extend block context");
scope_ = stmt->block_scope();
Handle<SerializedScopeInfo> scope_info = scope_->GetSerializedScopeInfo();
int heap_slots =
scope_info->NumberOfContextSlots() - Context::MIN_CONTEXT_SLOTS;
__ Push(scope_info);
__ Push(scope_->GetSerializedScopeInfo());
PushFunctionArgumentForContextAllocation();
if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
FastNewBlockContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kPushBlockContext, 2);
}
// Replace the context stored in the frame.
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
}
@ -1328,21 +1321,19 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryCatch::Exit(
}
bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
Expression *sub_expr;
bool FullCodeGenerator::TryLiteralCompare(CompareOperation* compare,
Label* if_true,
Label* if_false,
Label* fall_through) {
Expression *expr;
Handle<String> check;
if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
EmitLiteralCompareTypeof(sub_expr, check);
return true;
}
if (expr->IsLiteralCompareUndefined(&sub_expr)) {
EmitLiteralCompareNil(expr, sub_expr, kUndefinedValue);
if (compare->IsLiteralCompareTypeof(&expr, &check)) {
EmitLiteralCompareTypeof(expr, check, if_true, if_false, fall_through);
return true;
}
if (expr->IsLiteralCompareNull(&sub_expr)) {
EmitLiteralCompareNil(expr, sub_expr, kNullValue);
if (compare->IsLiteralCompareUndefined(&expr)) {
EmitLiteralCompareUndefined(expr, if_true, if_false, fall_through);
return true;
}

23
deps/v8/src/full-codegen.h

@ -391,16 +391,25 @@ class FullCodeGenerator: public AstVisitor {
// Try to perform a comparison as a fast inlined literal compare if
// the operands allow it. Returns true if the compare operations
// has been matched and all code generated; false otherwise.
bool TryLiteralCompare(CompareOperation* compare);
bool TryLiteralCompare(CompareOperation* compare,
Label* if_true,
Label* if_false,
Label* fall_through);
// Platform-specific code for comparing the type of a value with
// a given literal string.
void EmitLiteralCompareTypeof(Expression* expr, Handle<String> check);
void EmitLiteralCompareTypeof(Expression* expr,
Handle<String> check,
Label* if_true,
Label* if_false,
Label* fall_through);
// Platform-specific code for equality comparison with a nil-like value.
void EmitLiteralCompareNil(CompareOperation* expr,
Expression* sub_expr,
NilValue nil);
// Platform-specific code for strict equality comparison with
// the undefined value.
void EmitLiteralCompareUndefined(Expression* expr,
Label* if_true,
Label* if_false,
Label* fall_through);
// Bailout support.
void PrepareForBailout(Expression* node, State state);
@ -423,7 +432,7 @@ class FullCodeGenerator: public AstVisitor {
// Platform-specific code for a variable, constant, or function
// declaration. Functions have an initial value.
void EmitDeclaration(VariableProxy* proxy,
VariableMode mode,
Variable::Mode mode,
FunctionLiteral* function,
int* global_count);

4
deps/v8/src/globals.h

@ -255,10 +255,6 @@ const int kBinary32MinExponent = 0x01;
const int kBinary32MantissaBits = 23;
const int kBinary32ExponentShift = 23;
// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
// other bits set.
const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
// ASCII/UC16 constants
// Code-point values in Unicode 4.0 are 21 bits wide.
typedef uint16_t uc16;

17
deps/v8/src/handles.cc

@ -190,11 +190,7 @@ static int ExpectedNofPropertiesFromEstimate(int estimate) {
// Inobject slack tracking will reclaim redundant inobject space later,
// so we can afford to adjust the estimate generously.
if (FLAG_clever_optimizations) {
return estimate + 8;
} else {
return estimate + 3;
}
}
@ -425,18 +421,17 @@ Handle<Object> PreventExtensions(Handle<JSObject> object) {
}
Handle<Object> SetHiddenProperty(Handle<JSObject> obj,
Handle<String> key,
Handle<Object> value) {
Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
JSObject::HiddenPropertiesFlag flag) {
CALL_HEAP_FUNCTION(obj->GetIsolate(),
obj->SetHiddenProperty(*key, *value),
obj->GetHiddenProperties(flag),
Object);
}
int GetIdentityHash(Handle<JSReceiver> obj) {
int GetIdentityHash(Handle<JSObject> obj) {
CALL_AND_RETRY(obj->GetIsolate(),
obj->GetIdentityHash(ALLOW_CREATION),
obj->GetIdentityHash(JSObject::ALLOW_CREATION),
return Smi::cast(__object__)->value(),
return 0);
}
@ -891,7 +886,7 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
Handle<JSReceiver> key,
Handle<JSObject> key,
Handle<Object> value) {
CALL_HEAP_FUNCTION(table->GetIsolate(),
table->Put(*key, *value),

15
deps/v8/src/handles.h

@ -263,13 +263,14 @@ Handle<Object> GetPrototype(Handle<Object> obj);
Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value);
// Sets a hidden property on an object. Returns obj on success, undefined
// if trying to set the property on a detached proxy.
Handle<Object> SetHiddenProperty(Handle<JSObject> obj,
Handle<String> key,
Handle<Object> value);
// Return the object's hidden properties object. If the object has no hidden
// properties and HiddenPropertiesFlag::ALLOW_CREATION is passed, then a new
// hidden property object will be allocated. Otherwise Heap::undefined_value
// is returned.
Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
JSObject::HiddenPropertiesFlag flag);
int GetIdentityHash(Handle<JSReceiver> obj);
int GetIdentityHash(Handle<JSObject> obj);
Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
Handle<Object> DeleteProperty(Handle<JSObject> obj, Handle<String> prop);
@ -347,7 +348,7 @@ Handle<Object> SetPrototype(Handle<JSFunction> function,
Handle<Object> PreventExtensions(Handle<JSObject> object);
Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
Handle<JSReceiver> key,
Handle<JSObject> key,
Handle<Object> value);
// Does lazy compilation of the given function. Returns true on success and

104
deps/v8/src/heap-inl.h

@ -33,26 +33,15 @@
#include "list-inl.h"
#include "objects.h"
#include "v8-counters.h"
#include "store-buffer.h"
#include "store-buffer-inl.h"
namespace v8 {
namespace internal {
void PromotionQueue::insert(HeapObject* target, int size) {
if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) {
NewSpacePage* rear_page =
NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
ASSERT(!rear_page->prev_page()->is_anchor());
rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->body_limit());
}
*(--rear_) = reinterpret_cast<intptr_t>(target);
*(--rear_) = size;
// Assert no overflow into live objects.
#ifdef DEBUG
SemiSpace::AssertValidRange(HEAP->new_space()->top(),
reinterpret_cast<Address>(rear_));
#endif
ASSERT(reinterpret_cast<Address>(rear_) >= HEAP->new_space()->top());
}
@ -95,7 +84,7 @@ MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
? lo_space_->AllocateRaw(size)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@ -128,7 +117,7 @@ MaybeObject* Heap::AllocateTwoByteSymbol(Vector<const uc16> str,
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
? lo_space_->AllocateRaw(size)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@ -192,7 +181,7 @@ MaybeObject* Heap::AllocateRaw(int size_in_bytes,
} else if (CODE_SPACE == space) {
result = code_space_->AllocateRaw(size_in_bytes);
} else if (LO_SPACE == space) {
result = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
result = lo_space_->AllocateRaw(size_in_bytes);
} else if (CELL_SPACE == space) {
result = cell_space_->AllocateRaw(size_in_bytes);
} else {
@ -276,11 +265,6 @@ bool Heap::InNewSpace(Object* object) {
}
bool Heap::InNewSpace(Address addr) {
return new_space_.Contains(addr);
}
bool Heap::InFromSpace(Object* object) {
return new_space_.FromSpaceContains(object);
}
@ -291,36 +275,29 @@ bool Heap::InToSpace(Object* object) {
}
bool Heap::OldGenerationAllocationLimitReached() {
if (!incremental_marking()->IsStopped()) return false;
return OldGenerationSpaceAvailable() < 0;
}
bool Heap::ShouldBePromoted(Address old_address, int object_size) {
// An object should be promoted if:
// - the object has survived a scavenge operation or
// - to space is already 25% full.
NewSpacePage* page = NewSpacePage::FromAddress(old_address);
Address age_mark = new_space_.age_mark();
bool below_mark = page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
(!page->ContainsLimit(age_mark) || old_address < age_mark);
return below_mark || (new_space_.Size() + object_size) >=
(new_space_.EffectiveCapacity() >> 2);
return old_address < new_space_.age_mark()
|| (new_space_.Size() + object_size) >= (new_space_.Capacity() >> 2);
}
void Heap::RecordWrite(Address address, int offset) {
if (!InNewSpace(address)) store_buffer_.Mark(address + offset);
if (new_space_.Contains(address)) return;
ASSERT(!new_space_.FromSpaceContains(address));
SLOW_ASSERT(Contains(address + offset));
Page::FromAddress(address)->MarkRegionDirty(address + offset);
}
void Heap::RecordWrites(Address address, int start, int len) {
if (!InNewSpace(address)) {
for (int i = 0; i < len; i++) {
store_buffer_.Mark(address + start + i * kPointerSize);
}
}
if (new_space_.Contains(address)) return;
ASSERT(!new_space_.FromSpaceContains(address));
Page* page = Page::FromAddress(address);
page->SetRegionMarks(page->GetRegionMarks() |
page->GetRegionMaskForSpan(address + start, len * kPointerSize));
}
@ -366,6 +343,31 @@ void Heap::CopyBlock(Address dst, Address src, int byte_size) {
}
void Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
Address src,
int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
Page* page = Page::FromAddress(dst);
uint32_t marks = page->GetRegionMarks();
for (int remaining = byte_size / kPointerSize;
remaining > 0;
remaining--) {
Memory::Object_at(dst) = Memory::Object_at(src);
if (InNewSpace(Memory::Object_at(dst))) {
marks |= page->GetRegionMaskForAddress(dst);
}
dst += kPointerSize;
src += kPointerSize;
}
page->SetRegionMarks(marks);
}
void Heap::MoveBlock(Address dst, Address src, int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
@ -385,6 +387,16 @@ void Heap::MoveBlock(Address dst, Address src, int byte_size) {
}
void Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
Address src,
int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
ASSERT((dst < src) || (dst >= (src + byte_size)));
CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, byte_size);
}
void Heap::ScavengePointer(HeapObject** p) {
ScavengeObject(p, *p);
}
@ -402,9 +414,7 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
// If the first word is a forwarding address, the object has already been
// copied.
if (first_word.IsForwardingAddress()) {
HeapObject* dest = first_word.ToForwardingAddress();
ASSERT(HEAP->InFromSpace(*p));
*p = dest;
*p = first_word.ToForwardingAddress();
return;
}
@ -449,7 +459,7 @@ int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
amount_of_external_allocated_memory_ -
amount_of_external_allocated_memory_at_last_global_gc_;
if (amount_since_last_global_gc > external_allocation_limit_) {
CollectAllGarbage(kNoGCFlags);
CollectAllGarbage(false);
}
} else {
// Avoid underflow.
@ -466,7 +476,6 @@ void Heap::SetLastScriptId(Object* last_script_id) {
roots_[kLastScriptIdRootIndex] = last_script_id;
}
Isolate* Heap::isolate() {
return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) -
reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4);
@ -679,6 +688,15 @@ Heap* _inline_get_heap_() {
}
void MarkCompactCollector::SetMark(HeapObject* obj) {
tracer_->increment_marked_count();
#ifdef DEBUG
UpdateLiveObjectCount(obj);
#endif
obj->SetMark();
}
} } // namespace v8::internal
#endif // V8_HEAP_INL_H_

1
deps/v8/src/heap-profiler.cc

@ -114,6 +114,7 @@ HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name,
bool generation_completed = true;
switch (s_type) {
case HeapSnapshot::kFull: {
HEAP->CollectAllGarbage(true);
HeapSnapshotGenerator generator(result, control);
generation_completed = generator.GenerateSnapshot();
break;

1471
deps/v8/src/heap.cc

File diff suppressed because it is too large

461
deps/v8/src/heap.h

@ -32,15 +32,11 @@
#include "allocation.h"
#include "globals.h"
#include "incremental-marking.h"
#include "list.h"
#include "mark-compact.h"
#include "objects-visiting.h"
#include "spaces.h"
#include "splay-tree-inl.h"
#include "store-buffer.h"
#include "v8-counters.h"
#include "v8globals.h"
namespace v8 {
namespace internal {
@ -53,19 +49,19 @@ inline Heap* _inline_get_heap_();
// Defines all the roots in Heap.
#define STRONG_ROOT_LIST(V) \
/* Put the byte array map early. We need it to be in place by the time */ \
/* the deserializer hits the next page, since it wants to put a byte */ \
/* array in the unused space at the end of the page. */ \
V(Map, byte_array_map, ByteArrayMap) \
V(Map, free_space_map, FreeSpaceMap) \
V(Map, one_pointer_filler_map, OnePointerFillerMap) \
V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
/* Cluster the most popular ones in a few cache lines here at the top. */ \
V(Smi, store_buffer_top, StoreBufferTop) \
V(Oddball, undefined_value, UndefinedValue) \
V(Oddball, the_hole_value, TheHoleValue) \
V(Oddball, null_value, NullValue) \
V(Oddball, true_value, TrueValue) \
V(Oddball, false_value, FalseValue) \
V(Oddball, arguments_marker, ArgumentsMarker) \
V(Oddball, frame_alignment_marker, FrameAlignmentMarker) \
V(Object, undefined_value, UndefinedValue) \
V(Object, the_hole_value, TheHoleValue) \
V(Object, null_value, NullValue) \
V(Object, true_value, TrueValue) \
V(Object, false_value, FalseValue) \
V(Object, arguments_marker, ArgumentsMarker) \
V(Map, heap_number_map, HeapNumberMap) \
V(Map, global_context_map, GlobalContextMap) \
V(Map, fixed_array_map, FixedArrayMap) \
@ -126,9 +122,8 @@ inline Heap* _inline_get_heap_();
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, message_object_map, JSMessageObjectMap) \
V(Map, foreign_map, ForeignMap) \
V(HeapNumber, nan_value, NanValue) \
V(HeapNumber, infinity_value, InfinityValue) \
V(HeapNumber, minus_zero_value, MinusZeroValue) \
V(Object, nan_value, NanValue) \
V(Object, minus_zero_value, MinusZeroValue) \
V(Map, neander_map, NeanderMap) \
V(JSObject, message_listeners, MessageListeners) \
V(Foreign, prototype_accessors, PrototypeAccessors) \
@ -231,9 +226,7 @@ inline Heap* _inline_get_heap_();
V(closure_symbol, "(closure)") \
V(use_strict, "use strict") \
V(dot_symbol, ".") \
V(anonymous_function_symbol, "(anonymous function)") \
V(infinity_symbol, "Infinity") \
V(minus_infinity_symbol, "-Infinity")
V(anonymous_function_symbol, "(anonymous function)")
// Forward declarations.
class GCTracer;
@ -245,26 +238,10 @@ class WeakObjectRetainer;
typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
Object** pointer);
class StoreBufferRebuilder {
public:
explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
: store_buffer_(store_buffer) {
}
void Callback(MemoryChunk* page, StoreBufferEvent event);
private:
StoreBuffer* store_buffer_;
// We record in this variable how full the store buffer was when we started
// iterating over the current page, finding pointers to new space. If the
// store buffer overflows again we can exempt the page from the store buffer
// by rewinding to this point instead of having to search the store buffer.
Object*** start_of_current_page_;
// The current page we are scanning in the store buffer iterator.
MemoryChunk* current_page_;
};
typedef bool (*DirtyRegionCallback)(Heap* heap,
Address start,
Address end,
ObjectSlotCallback copy_object_func);
// The all static Heap captures the interface to the global object heap.
@ -282,37 +259,22 @@ class PromotionQueue {
PromotionQueue() : front_(NULL), rear_(NULL) { }
void Initialize(Address start_address) {
// Assumes that a NewSpacePage exactly fits a number of promotion queue
// entries (where each is a pair of intptr_t). This allows us to simplify
// the test fpr when to switch pages.
ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
== 0);
ASSERT(NewSpacePage::IsAtEnd(start_address));
front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
}
bool is_empty() { return front_ == rear_; }
bool is_empty() { return front_ <= rear_; }
inline void insert(HeapObject* target, int size);
void remove(HeapObject** target, int* size) {
ASSERT(!is_empty());
if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
NewSpacePage* front_page =
NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
ASSERT(!front_page->prev_page()->is_anchor());
front_ =
reinterpret_cast<intptr_t*>(front_page->prev_page()->body_limit());
}
*target = reinterpret_cast<HeapObject*>(*(--front_));
*size = static_cast<int>(*(--front_));
// Assert no underflow.
SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
reinterpret_cast<Address>(front_));
ASSERT(front_ >= rear_);
}
private:
// The front of the queue is higher in the memory page chain than the rear.
// The front of the queue is higher in memory than the rear.
intptr_t* front_;
intptr_t* rear_;
@ -320,11 +282,6 @@ class PromotionQueue {
};
typedef void (*ScavengingCallback)(Map* map,
HeapObject** slot,
HeapObject* object);
// External strings table is a place where all external strings are
// registered. We need to keep track of such strings to properly
// finalize them.
@ -370,8 +327,8 @@ class Heap {
// Configure heap size before setup. Return false if the heap has been
// setup already.
bool ConfigureHeap(int max_semispace_size,
intptr_t max_old_gen_size,
intptr_t max_executable_size);
int max_old_gen_size,
int max_executable_size);
bool ConfigureHeapDefault();
// Initializes the global object heap. If create_heap_objects is true,
@ -499,7 +456,6 @@ class Heap {
// size, but keeping the original prototype. The receiver must have at least
// the size of the new object. The object is reinitialized and behaves as an
// object that has been freshly allocated.
// Returns failure if an error occured, otherwise object.
MUST_USE_RESULT MaybeObject* ReinitializeJSReceiver(JSReceiver* object,
InstanceType type,
int size);
@ -528,10 +484,8 @@ class Heap {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateMap(
InstanceType instance_type,
int instance_size,
ElementsKind elements_kind = FAST_ELEMENTS);
MUST_USE_RESULT MaybeObject* AllocateMap(InstanceType instance_type,
int instance_size);
// Allocates a partial map for bootstrapping.
MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,
@ -842,9 +796,9 @@ class Heap {
// failed.
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateExternalStringFromAscii(
const ExternalAsciiString::Resource* resource);
ExternalAsciiString::Resource* resource);
MUST_USE_RESULT MaybeObject* AllocateExternalStringFromTwoByte(
const ExternalTwoByteString::Resource* resource);
ExternalTwoByteString::Resource* resource);
// Finalizes an external string by deleting the associated external
// data and clearing the resource pointer.
@ -931,24 +885,13 @@ class Heap {
// collect more garbage.
inline bool CollectGarbage(AllocationSpace space);
static const int kNoGCFlags = 0;
static const int kMakeHeapIterableMask = 1;
// Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
// non-zero, then the slower precise sweeper is used, which leaves the heap
// in a state where we can iterate over the heap visiting all objects.
void CollectAllGarbage(int flags);
// Performs a full garbage collection. Force compaction if the
// parameter is true.
void CollectAllGarbage(bool force_compaction);
// Last hope GC, should try to squeeze as much as possible.
void CollectAllAvailableGarbage();
// Check whether the heap is currently iterable.
bool IsHeapIterable();
// Ensure that we have swept all spaces in such a way that we can iterate
// over all objects. May cause a GC.
void EnsureHeapIsIterable();
// Notify the heap that a context has been disposed.
int NotifyContextDisposed() { return ++contexts_disposed_; }
@ -956,20 +899,6 @@ class Heap {
// ensure correct callback for weak global handles.
void PerformScavenge();
inline void increment_scan_on_scavenge_pages() {
scan_on_scavenge_pages_++;
if (FLAG_gc_verbose) {
PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
}
}
inline void decrement_scan_on_scavenge_pages() {
scan_on_scavenge_pages_--;
if (FLAG_gc_verbose) {
PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
}
}
PromotionQueue* promotion_queue() { return &promotion_queue_; }
#ifdef DEBUG
@ -996,8 +925,6 @@ class Heap {
// Heap root getters. We have versions with and without type::cast() here.
// You can't use type::cast during GC because the assert fails.
// TODO(1490): Try removing the unchecked accessors, now that GC marking does
// not corrupt the stack.
#define ROOT_ACCESSOR(type, name, camel_name) \
type* name() { \
return type::cast(roots_[k##camel_name##RootIndex]); \
@ -1031,9 +958,6 @@ class Heap {
}
Object* global_contexts_list() { return global_contexts_list_; }
// Number of mark-sweeps.
int ms_count() { return ms_count_; }
// Iterates over all roots in the heap.
void IterateRoots(ObjectVisitor* v, VisitMode mode);
// Iterates over all strong roots in the heap.
@ -1041,16 +965,60 @@ class Heap {
// Iterates over all the other roots in the heap.
void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
enum ExpectedPageWatermarkState {
WATERMARK_SHOULD_BE_VALID,
WATERMARK_CAN_BE_INVALID
};
// For each dirty region on a page in use from an old space call
// visit_dirty_region callback.
// If either visit_dirty_region or callback can cause an allocation
// in old space and changes in allocation watermark then
// can_preallocate_during_iteration should be set to true.
// All pages will be marked as having invalid watermark upon
// iteration completion.
void IterateDirtyRegions(
PagedSpace* space,
DirtyRegionCallback visit_dirty_region,
ObjectSlotCallback callback,
ExpectedPageWatermarkState expected_page_watermark_state);
// Interpret marks as a bitvector of dirty marks for regions of size
// Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering
// memory interval from start to top. For each dirty region call a
// visit_dirty_region callback. Return updated bitvector of dirty marks.
uint32_t IterateDirtyRegions(uint32_t marks,
Address start,
Address end,
DirtyRegionCallback visit_dirty_region,
ObjectSlotCallback callback);
// Iterate pointers to from semispace of new space found in memory interval
// from start to end.
// Update dirty marks for page containing start address.
void IterateAndMarkPointersToFromSpace(Address start,
Address end,
ObjectSlotCallback callback);
// Iterate pointers to new space found in memory interval from start to end.
// Return true if pointers to new space was found.
static bool IteratePointersInDirtyRegion(Heap* heap,
Address start,
Address end,
ObjectSlotCallback callback);
// Iterate pointers to new space found in memory interval from start to end.
// This interval is considered to belong to the map space.
// Return true if pointers to new space was found.
static bool IteratePointersInDirtyMapsRegion(Heap* heap,
Address start,
Address end,
ObjectSlotCallback callback);
// Returns whether the object resides in new space.
inline bool InNewSpace(Object* object);
inline bool InNewSpace(Address addr);
inline bool InNewSpacePage(Address addr);
inline bool InFromSpace(Object* object);
inline bool InToSpace(Object* object);
@ -1089,20 +1057,12 @@ class Heap {
roots_[kEmptyScriptRootIndex] = script;
}
void public_set_store_buffer_top(Address* top) {
roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
}
// Update the next script id.
inline void SetLastScriptId(Object* last_script_id);
// Generated code can embed this address to get access to the roots.
Object** roots_address() { return roots_; }
Address* store_buffer_top_address() {
return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
}
// Get address of global contexts list for serialization support.
Object** global_contexts_list_address() {
return &global_contexts_list_;
@ -1115,10 +1075,6 @@ class Heap {
// Verify the heap is in its normal state before or after a GC.
void Verify();
void OldPointerSpaceCheckStoreBuffer();
void MapSpaceCheckStoreBuffer();
void LargeObjectSpaceCheckStoreBuffer();
// Report heap statistics.
void ReportHeapStatistics(const char* title);
void ReportCodeStatistics(const char* title);
@ -1214,53 +1170,22 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length,
PretenureFlag pretenure);
inline intptr_t PromotedTotalSize() {
return PromotedSpaceSize() + PromotedExternalMemorySize();
}
// True if we have reached the allocation limit in the old generation that
// should force the next GC (caused normally) to be a full one.
inline bool OldGenerationPromotionLimitReached() {
return PromotedTotalSize() > old_gen_promotion_limit_;
bool OldGenerationPromotionLimitReached() {
return (PromotedSpaceSize() + PromotedExternalMemorySize())
> old_gen_promotion_limit_;
}
inline intptr_t OldGenerationSpaceAvailable() {
return old_gen_allocation_limit_ - PromotedTotalSize();
intptr_t OldGenerationSpaceAvailable() {
return old_gen_allocation_limit_ -
(PromotedSpaceSize() + PromotedExternalMemorySize());
}
static const intptr_t kMinimumPromotionLimit = 5 * Page::kPageSize;
static const intptr_t kMinimumAllocationLimit =
8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
// When we sweep lazily we initially guess that there is no garbage on the
// heap and set the limits for the next GC accordingly. As we sweep we find
// out that some of the pages contained garbage and we have to adjust
// downwards the size of the heap. This means the limits that control the
// timing of the next GC also need to be adjusted downwards.
void LowerOldGenLimits(intptr_t adjustment) {
size_of_old_gen_at_last_old_space_gc_ -= adjustment;
old_gen_promotion_limit_ =
OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
old_gen_allocation_limit_ =
OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
}
intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
const int divisor = FLAG_stress_compaction ? 10 : 3;
intptr_t limit =
Max(old_gen_size + old_gen_size / divisor, kMinimumPromotionLimit);
limit += new_space_.Capacity();
limit *= old_gen_limit_factor_;
return limit;
}
intptr_t OldGenAllocationLimit(intptr_t old_gen_size) {
const int divisor = FLAG_stress_compaction ? 8 : 2;
intptr_t limit =
Max(old_gen_size + old_gen_size / divisor, kMinimumAllocationLimit);
limit += new_space_.Capacity();
limit *= old_gen_limit_factor_;
return limit;
// True if we have reached the allocation limit in the old generation that
// should artificially cause a GC right now.
bool OldGenerationAllocationLimitReached() {
return OldGenerationSpaceAvailable() < 0;
}
// Can be called when the embedding application is idle.
@ -1288,8 +1213,6 @@ class Heap {
MUST_USE_RESULT MaybeObject* NumberToString(
Object* number, bool check_number_string_cache = true);
MUST_USE_RESULT MaybeObject* Uint32ToString(
uint32_t value, bool check_number_string_cache = true);
Map* MapForExternalArrayType(ExternalArrayType array_type);
RootListIndex RootIndexForExternalArrayType(
@ -1301,10 +1224,18 @@ class Heap {
// by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size);
inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
Address src,
int byte_size);
// Optimized version of memmove for blocks with pointer size aligned sizes and
// pointer size aligned addresses.
static inline void MoveBlock(Address dst, Address src, int byte_size);
inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
Address src,
int byte_size);
// Check new space expansion criteria and expand semispaces if it was hit.
void CheckNewSpaceExpansionCriteria();
@ -1313,31 +1244,9 @@ class Heap {
survived_since_last_expansion_ += survived;
}
inline bool NextGCIsLikelyToBeFull() {
if (FLAG_gc_global) return true;
intptr_t total_promoted = PromotedTotalSize();
intptr_t adjusted_promotion_limit =
old_gen_promotion_limit_ - new_space_.Capacity();
if (total_promoted >= adjusted_promotion_limit) return true;
intptr_t adjusted_allocation_limit =
old_gen_allocation_limit_ - new_space_.Capacity() / 5;
if (PromotedSpaceSize() >= adjusted_allocation_limit) return true;
return false;
}
void UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func);
void UpdateReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func);
void ProcessWeakReferences(WeakObjectRetainer* retainer);
// Helper function that governs the promotion policy from new space to
@ -1354,9 +1263,6 @@ class Heap {
GCTracer* tracer() { return tracer_; }
// Returns the size of objects residing in non new spaces.
intptr_t PromotedSpaceSize();
double total_regexp_code_generated() { return total_regexp_code_generated_; }
void IncreaseTotalRegexpCodeGenerated(int size) {
total_regexp_code_generated_ += size;
@ -1375,18 +1281,6 @@ class Heap {
return &mark_compact_collector_;
}
StoreBuffer* store_buffer() {
return &store_buffer_;
}
Marking* marking() {
return &marking_;
}
IncrementalMarking* incremental_marking() {
return &incremental_marking_;
}
ExternalStringTable* external_string_table() {
return &external_string_table_;
}
@ -1397,28 +1291,16 @@ class Heap {
}
inline Isolate* isolate();
bool is_safe_to_read_maps() { return is_safe_to_read_maps_; }
inline void CallGlobalGCPrologueCallback() {
void CallGlobalGCPrologueCallback() {
if (global_gc_prologue_callback_ != NULL) global_gc_prologue_callback_();
}
inline void CallGlobalGCEpilogueCallback() {
void CallGlobalGCEpilogueCallback() {
if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_();
}
inline bool OldGenerationAllocationLimitReached();
inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
}
void QueueMemoryChunkForFree(MemoryChunk* chunk);
void FreeQueuedChunks();
// Completely clear the Instanceof cache (to stop it keeping objects alive
// around a GC).
inline void CompletelyClearInstanceofCache();
private:
Heap();
@ -1426,12 +1308,12 @@ class Heap {
// more expedient to get at the isolate directly from within Heap methods.
Isolate* isolate_;
intptr_t code_range_size_;
int reserved_semispace_size_;
int max_semispace_size_;
int initial_semispace_size_;
intptr_t max_old_generation_size_;
intptr_t max_executable_size_;
intptr_t code_range_size_;
// For keeping track of how much data has survived
// scavenge since last new space expansion.
@ -1446,8 +1328,6 @@ class Heap {
// For keeping track of context disposals.
int contexts_disposed_;
int scan_on_scavenge_pages_;
#if defined(V8_TARGET_ARCH_X64)
static const int kMaxObjectSizeInNewSpace = 1024*KB;
#else
@ -1464,9 +1344,13 @@ class Heap {
HeapState gc_state_;
int gc_post_processing_depth_;
// Returns the size of object residing in non new spaces.
intptr_t PromotedSpaceSize();
// Returns the amount of external memory registered since last global gc.
int PromotedExternalMemorySize();
int mc_count_; // how many mark-compact collections happened
int ms_count_; // how many mark-sweep collections happened
unsigned int gc_count_; // how many gc happened
@ -1505,13 +1389,6 @@ class Heap {
// every allocation in large object space.
intptr_t old_gen_allocation_limit_;
// Sometimes the heuristics dictate that those limits are increased. This
// variable records that fact.
int old_gen_limit_factor_;
// Used to adjust the limits that control the timing of the next GC.
intptr_t size_of_old_gen_at_last_old_space_gc_;
// Limit on the amount of externally allocated memory allowed
// between global GCs. If reached a global GC is forced.
intptr_t external_allocation_limit_;
@ -1531,8 +1408,6 @@ class Heap {
Object* global_contexts_list_;
StoreBufferRebuilder store_buffer_rebuilder_;
struct StringTypeTable {
InstanceType type;
int size;
@ -1590,11 +1465,13 @@ class Heap {
// Support for computing object sizes during GC.
HeapObjectCallback gc_safe_size_of_old_object_;
static int GcSafeSizeOfOldObject(HeapObject* object);
static int GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object);
// Update the GC state. Called from the mark-compact collector.
void MarkMapPointersAsEncoded(bool encoded) {
ASSERT(!encoded);
gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject;
gc_safe_size_of_old_object_ = encoded
? &GcSafeSizeOfOldObjectWithEncodedMap
: &GcSafeSizeOfOldObject;
}
// Checks whether a global GC is necessary
@ -1606,10 +1483,11 @@ class Heap {
bool PerformGarbageCollection(GarbageCollector collector,
GCTracer* tracer);
static const intptr_t kMinimumPromotionLimit = 2 * MB;
static const intptr_t kMinimumAllocationLimit = 8 * MB;
inline void UpdateOldSpaceLimits();
// Allocate an uninitialized object in map space. The behavior is identical
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
// have to test the allocation space argument and (b) can reduce code size
@ -1644,6 +1522,8 @@ class Heap {
// Allocate empty fixed double array.
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();
void SwitchScavengingVisitorsTableIfProfilingWasEnabled();
// Performs a minor collection in new generation.
void Scavenge();
@ -1652,15 +1532,16 @@ class Heap {
Object** pointer);
Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
static void ScavengeStoreBufferCallback(Heap* heap,
MemoryChunk* page,
StoreBufferEvent event);
// Performs a major collection in the whole heap.
void MarkCompact(GCTracer* tracer);
// Code to be run before and after mark-compact.
void MarkCompactPrologue();
void MarkCompactPrologue(bool is_compacting);
// Completely clear the Instanceof cache (to stop it keeping objects alive
// around a GC).
inline void CompletelyClearInstanceofCache();
// Record statistics before and after garbage collection.
void ReportStatisticsBeforeGC();
@ -1670,11 +1551,12 @@ class Heap {
static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
// Initializes a function with a shared part and prototype.
// Returns the function.
// Note: this code was factored out of AllocateFunction such that
// other parts of the VM could use it. Specifically, a function that creates
// instances of type JS_FUNCTION_TYPE benefit from the use of this function.
// Please note this does not perform a garbage collection.
inline void InitializeFunction(
MUST_USE_RESULT inline MaybeObject* InitializeFunction(
JSFunction* function,
SharedFunctionInfo* shared,
Object* prototype);
@ -1739,8 +1621,6 @@ class Heap {
return high_survival_rate_period_length_ > 0;
}
void SelectScavengingVisitorsTable();
static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
@ -1760,11 +1640,10 @@ class Heap {
MarkCompactCollector mark_compact_collector_;
StoreBuffer store_buffer_;
Marking marking_;
IncrementalMarking incremental_marking_;
// This field contains the meaning of the WATERMARK_INVALIDATED flag.
// Instead of clearing this flag from all pages we just flip
// its meaning at the beginning of a scavenge.
intptr_t page_watermark_invalidated_mark_;
int number_idle_notifications_;
unsigned int last_idle_notification_gc_count_;
@ -1779,9 +1658,7 @@ class Heap {
ExternalStringTable external_string_table_;
VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
MemoryChunk* chunks_queued_for_free_;
bool is_safe_to_read_maps_;
friend class Factory;
friend class GCTracer;
@ -1880,6 +1757,29 @@ class VerifyPointersVisitor: public ObjectVisitor {
}
}
};
// Visitor class to verify interior pointers in spaces that use region marks
// to keep track of intergenerational references.
// As VerifyPointersVisitor but also checks that dirty marks are set
// for regions covering intergenerational references.
class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
ASSERT(HEAP->Contains(object));
ASSERT(object->map()->IsMap());
if (HEAP->InNewSpace(object)) {
ASSERT(HEAP->InToSpace(object));
Address addr = reinterpret_cast<Address>(current);
ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr));
}
}
}
}
};
#endif
@ -2212,6 +2112,16 @@ class GCTracer BASE_EMBEDDED {
// Sets the full GC count.
void set_full_gc_count(int count) { full_gc_count_ = count; }
// Sets the flag that this is a compacting full GC.
void set_is_compacting() { is_compacting_ = true; }
bool is_compacting() const { return is_compacting_; }
// Increment and decrement the count of marked objects.
void increment_marked_count() { ++marked_count_; }
void decrement_marked_count() { --marked_count_; }
int marked_count() { return marked_count_; }
void increment_promoted_objects_size(int object_size) {
promoted_objects_size_ += object_size;
}
@ -2236,6 +2146,23 @@ class GCTracer BASE_EMBEDDED {
// A count (including this one) of the number of full garbage collections.
int full_gc_count_;
// True if the current GC is a compacting full collection, false
// otherwise.
bool is_compacting_;
// True if the *previous* full GC cwas a compacting collection (will be
// false if there has not been a previous full GC).
bool previous_has_compacted_;
// On a full GC, a count of the number of marked objects. Incremented
// when an object is marked and decremented when an object's mark bit is
// cleared. Will be zero on a scavenge collection.
int marked_count_;
// The count from the end of the previous full GC. Will be zero if there
// was no previous full GC.
int previous_marked_count_;
// Amounts of time spent in different scopes during GC.
double scopes_[Scope::kNumberOfScopes];
@ -2254,13 +2181,6 @@ class GCTracer BASE_EMBEDDED {
// Size of objects promoted during the current collection.
intptr_t promoted_objects_size_;
// Incremental marking steps counters.
int steps_count_;
double steps_took_;
double longest_step_;
int steps_count_since_last_gc_;
double steps_took_since_last_gc_;
Heap* heap_;
};
@ -2372,46 +2292,6 @@ class WeakObjectRetainer {
};
// Intrusive object marking uses least significant bit of
// heap object's map word to mark objects.
// Normally all map words have least significant bit set
// because they contain tagged map pointer.
// If the bit is not set object is marked.
// All objects should be unmarked before resuming
// JavaScript execution.
class IntrusiveMarking {
public:
static bool IsMarked(HeapObject* object) {
return (object->map_word().ToRawValue() & kNotMarkedBit) == 0;
}
static void ClearMark(HeapObject* object) {
uintptr_t map_word = object->map_word().ToRawValue();
object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit));
ASSERT(!IsMarked(object));
}
static void SetMark(HeapObject* object) {
uintptr_t map_word = object->map_word().ToRawValue();
object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit));
ASSERT(IsMarked(object));
}
static Map* MapOfMarkedObject(HeapObject* object) {
uintptr_t map_word = object->map_word().ToRawValue();
return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap();
}
static int SizeOfMarkedObject(HeapObject* object) {
return object->SizeFromMap(MapOfMarkedObject(object));
}
private:
static const uintptr_t kNotMarkedBit = 0x1;
STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0);
};
#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
// Helper class for tracing paths to a search target Object from all roots.
// The TracePathFrom() method can be used to trace paths from a specific
@ -2470,6 +2350,7 @@ class PathTracer : public ObjectVisitor {
};
#endif // DEBUG || LIVE_OBJECT_LIST
} } // namespace v8::internal
#undef HEAP

66
deps/v8/src/hydrogen-instructions.cc

@ -707,14 +707,6 @@ void HUnaryControlInstruction::PrintDataTo(StringStream* stream) {
}
void HIsNilAndBranch::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(kind() == kStrictEquality ? " === " : " == ");
stream->Add(nil() == kNullValue ? "null" : "undefined");
HControlInstruction::PrintDataTo(stream);
}
void HReturn::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
}
@ -785,22 +777,15 @@ void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" == ");
stream->Add(type_literal_->GetFlatContent().ToAsciiVector());
HControlInstruction::PrintDataTo(stream);
}
void HTypeof::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
}
void HChange::PrintDataTo(StringStream* stream) {
HUnaryOperation::PrintDataTo(stream);
stream->Add(" %s to %s", from().Mnemonic(), to().Mnemonic());
stream->Add(" %s to %s", from_.Mnemonic(), to().Mnemonic());
if (CanTruncateToInt32()) stream->Add(" truncating-int32");
if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
if (CheckFlag(kDeoptimizeOnUndefined)) stream->Add(" deopt-on-undefined");
}
@ -872,23 +857,6 @@ void HCheckFunction::PrintDataTo(StringStream* stream) {
}
const char* HCheckInstanceType::GetCheckName() {
switch (check_) {
case IS_SPEC_OBJECT: return "object";
case IS_JS_ARRAY: return "array";
case IS_STRING: return "string";
case IS_SYMBOL: return "symbol";
}
UNREACHABLE();
return "";
}
void HCheckInstanceType::PrintDataTo(StringStream* stream) {
stream->Add("%s ", GetCheckName());
HUnaryOperation::PrintDataTo(stream);
}
void HCallStub::PrintDataTo(StringStream* stream) {
stream->Add("%s ",
CodeStub::MajorName(major_key_, false));
@ -1343,14 +1311,6 @@ void HCompareIDAndBranch::PrintDataTo(StringStream* stream) {
}
void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) {
left()->PrintNameTo(stream);
stream->Add(" ");
right()->PrintNameTo(stream);
HControlInstruction::PrintDataTo(stream);
}
void HGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", SuccessorAt(0)->block_id());
}
@ -1465,7 +1425,7 @@ void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
}
bool HLoadKeyedFastElement::RequiresHoleCheck() {
bool HLoadKeyedFastElement::RequiresHoleCheck() const {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
if (!use->IsChange()) return true;
@ -1482,6 +1442,11 @@ void HLoadKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
}
bool HLoadKeyedFastDoubleElement::RequiresHoleCheck() const {
return true;
}
void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add("[");
@ -1523,7 +1488,6 @@ void HLoadKeyedSpecializedArrayElement::PrintDataTo(
stream->Add("pixel");
break;
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
@ -1618,7 +1582,6 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo(
case EXTERNAL_PIXEL_ELEMENTS:
stream->Add("pixel");
break;
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
@ -1635,18 +1598,7 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo(
void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
stream->Add("[%p]", *cell());
if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
if (details_.IsReadOnly()) stream->Add(" (read-only)");
}
bool HLoadGlobalCell::RequiresHoleCheck() {
if (details_.IsDontDelete() && !details_.IsReadOnly()) return false;
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
if (!use->IsChange()) return true;
}
return false;
if (check_hole_value()) stream->Add(" (deleteable/read-only)");
}
@ -1658,8 +1610,6 @@ void HLoadGlobalGeneric::PrintDataTo(StringStream* stream) {
void HStoreGlobalCell::PrintDataTo(StringStream* stream) {
stream->Add("[%p] = ", *cell());
value()->PrintNameTo(stream);
if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
if (details_.IsReadOnly()) stream->Add(" (read-only)");
}

338
deps/v8/src/hydrogen-instructions.h

File diff suppressed because it is too large

403
deps/v8/src/hydrogen.cc

@ -422,7 +422,7 @@ class ReachabilityAnalyzer BASE_EMBEDDED {
};
void HGraph::Verify(bool do_full_verify) const {
void HGraph::Verify() const {
for (int i = 0; i < blocks_.length(); i++) {
HBasicBlock* block = blocks_.at(i);
@ -473,7 +473,6 @@ void HGraph::Verify(bool do_full_verify) const {
// Check special property of first block to have no predecessors.
ASSERT(blocks_.at(0)->predecessors()->is_empty());
if (do_full_verify) {
// Check that the graph is fully connected.
ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
ASSERT(analyzer.visited_count() == blocks_.length());
@ -495,7 +494,6 @@ void HGraph::Verify(bool do_full_verify) const {
ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id()));
}
}
}
}
#endif
@ -852,7 +850,7 @@ void HGraph::EliminateUnreachablePhis() {
}
bool HGraph::CheckArgumentsPhiUses() {
bool HGraph::CheckPhis() {
int block_count = blocks_.length();
for (int i = 0; i < block_count; ++i) {
for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
@ -865,11 +863,13 @@ bool HGraph::CheckArgumentsPhiUses() {
}
bool HGraph::CheckConstPhiUses() {
bool HGraph::CollectPhis() {
int block_count = blocks_.length();
phi_list_ = new ZoneList<HPhi*>(block_count);
for (int i = 0; i < block_count; ++i) {
for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
HPhi* phi = blocks_[i]->phis()->at(j);
phi_list_->Add(phi);
// Check for the hole value (from an uninitialized const).
for (int k = 0; k < phi->OperandCount(); k++) {
if (phi->OperandAt(k) == GetConstantHole()) return false;
@ -880,18 +880,6 @@ bool HGraph::CheckConstPhiUses() {
}
void HGraph::CollectPhis() {
int block_count = blocks_.length();
phi_list_ = new ZoneList<HPhi*>(block_count);
for (int i = 0; i < block_count; ++i) {
for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
HPhi* phi = blocks_[i]->phis()->at(j);
phi_list_->Add(phi);
}
}
}
void HGraph::InferTypes(ZoneList<HValue*>* worklist) {
BitVector in_worklist(GetMaximumValueID());
for (int i = 0; i < worklist->length(); ++i) {
@ -1499,6 +1487,9 @@ int HGlobalValueNumberer::CollectSideEffectsOnPathsToDominatedBlock(
block->block_id() < dominated->block_id() &&
visited_on_paths_.Add(block->block_id())) {
side_effects |= block_side_effects_[block->block_id()];
if (block->IsLoopHeader()) {
side_effects |= loop_side_effects_[block->block_id()];
}
side_effects |= CollectSideEffectsOnPathsToDominatedBlock(
dominator, block);
}
@ -1860,7 +1851,7 @@ void HGraph::InsertRepresentationChangeForUse(HValue* value,
}
if (new_value == NULL) {
new_value = new(zone()) HChange(value, to,
new_value = new(zone()) HChange(value, value->representation(), to,
is_truncating, deoptimize_on_undefined);
}
@ -2311,7 +2302,7 @@ HGraph* HGraphBuilder::CreateGraph() {
// Handle implicit declaration of the function name in named function
// expressions before other declarations.
if (scope->is_function_scope() && scope->function() != NULL) {
HandleDeclaration(scope->function(), CONST, NULL);
HandleDeclaration(scope->function(), Variable::CONST, NULL);
}
VisitDeclarations(scope->declarations());
AddSimulate(AstNode::kDeclarationsId);
@ -2332,24 +2323,17 @@ HGraph* HGraphBuilder::CreateGraph() {
graph()->OrderBlocks();
graph()->AssignDominators();
#ifdef DEBUG
// Do a full verify after building the graph and computing dominators.
graph()->Verify(true);
#endif
graph()->PropagateDeoptimizingMark();
if (!graph()->CheckConstPhiUses()) {
Bailout("Unsupported phi use of const variable");
return NULL;
}
graph()->EliminateRedundantPhis();
if (!graph()->CheckArgumentsPhiUses()) {
Bailout("Unsupported phi use of arguments");
if (!graph()->CheckPhis()) {
Bailout("Unsupported phi use of arguments object");
return NULL;
}
if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis();
graph()->CollectPhis();
if (!graph()->CollectPhis()) {
Bailout("Unsupported phi use of uninitialized constant");
return NULL;
}
HInferRepresentation rep(graph());
rep.Analyze();
@ -3141,21 +3125,11 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
Variable* variable = expr->var();
if (variable->mode() == LET) {
if (variable->mode() == Variable::LET) {
return Bailout("reference to let variable");
}
switch (variable->location()) {
case Variable::UNALLOCATED: {
// Handle known global constants like 'undefined' specially to avoid a
// load from a global cell for them.
Handle<Object> constant_value =
isolate()->factory()->GlobalConstantFor(variable->name());
if (!constant_value.is_null()) {
HConstant* instr =
new(zone()) HConstant(constant_value, Representation::Tagged());
return ast_context()->ReturnInstruction(instr, expr->id());
}
LookupResult lookup;
GlobalPropertyAccess type =
LookupGlobalProperty(variable, &lookup, false);
@ -3168,8 +3142,8 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
if (type == kUseCell) {
Handle<GlobalObject> global(info()->global_object());
Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
HLoadGlobalCell* instr =
new(zone()) HLoadGlobalCell(cell, lookup.GetPropertyDetails());
bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
HLoadGlobalCell* instr = new(zone()) HLoadGlobalCell(cell, check_hole);
return ast_context()->ReturnInstruction(instr, expr->id());
} else {
HValue* context = environment()->LookupContext();
@ -3188,7 +3162,7 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
case Variable::PARAMETER:
case Variable::LOCAL: {
HValue* value = environment()->Lookup(variable);
if (variable->mode() == CONST &&
if (variable->mode() == Variable::CONST &&
value == graph()->GetConstantHole()) {
return Bailout("reference to uninitialized const variable");
}
@ -3196,7 +3170,7 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
}
case Variable::CONTEXT: {
if (variable->mode() == CONST) {
if (variable->mode() == Variable::CONST) {
return Bailout("reference to const context slot");
}
HValue* context = BuildContextChainWalk(variable);
@ -3346,43 +3320,7 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
HValue* key = AddInstruction(
new(zone()) HConstant(Handle<Object>(Smi::FromInt(i)),
Representation::Integer32()));
HInstruction* elements_kind =
AddInstruction(new(zone()) HElementsKind(literal));
HBasicBlock* store_fast = graph()->CreateBasicBlock();
// Two empty blocks to satisfy edge split form.
HBasicBlock* store_fast_edgesplit1 = graph()->CreateBasicBlock();
HBasicBlock* store_fast_edgesplit2 = graph()->CreateBasicBlock();
HBasicBlock* store_generic = graph()->CreateBasicBlock();
HBasicBlock* check_smi_only_elements = graph()->CreateBasicBlock();
HBasicBlock* join = graph()->CreateBasicBlock();
HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(value);
smicheck->SetSuccessorAt(0, store_fast_edgesplit1);
smicheck->SetSuccessorAt(1, check_smi_only_elements);
current_block()->Finish(smicheck);
store_fast_edgesplit1->Finish(new(zone()) HGoto(store_fast));
set_current_block(check_smi_only_elements);
HCompareConstantEqAndBranch* smi_elements_check =
new(zone()) HCompareConstantEqAndBranch(elements_kind,
FAST_SMI_ONLY_ELEMENTS,
Token::EQ_STRICT);
smi_elements_check->SetSuccessorAt(0, store_generic);
smi_elements_check->SetSuccessorAt(1, store_fast_edgesplit2);
current_block()->Finish(smi_elements_check);
store_fast_edgesplit2->Finish(new(zone()) HGoto(store_fast));
set_current_block(store_fast);
AddInstruction(new(zone()) HStoreKeyedFastElement(elements, key, value));
store_fast->Goto(join);
set_current_block(store_generic);
AddInstruction(BuildStoreKeyedGeneric(literal, key, value));
store_generic->Goto(join);
join->SetJoinId(expr->id());
set_current_block(join);
AddSimulate(expr->GetIdForElement(i));
}
return ast_context()->ReturnValue(Pop());
@ -3626,10 +3564,10 @@ void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
LookupResult lookup;
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
if (type == kUseCell) {
bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
Handle<GlobalObject> global(info()->global_object());
Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
HInstruction* instr =
new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails());
HInstruction* instr = new(zone()) HStoreGlobalCell(value, cell, check_hole);
instr->set_position(position);
AddInstruction(instr);
if (instr->HasSideEffects()) AddSimulate(ast_id);
@ -3663,7 +3601,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
if (proxy != NULL) {
Variable* var = proxy->var();
if (var->mode() == CONST || var->mode() == LET) {
if (var->mode() == Variable::CONST || var->mode() == Variable::LET) {
return Bailout("unsupported let or const compound assignment");
}
@ -3808,7 +3746,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
HandlePropertyAssignment(expr);
} else if (proxy != NULL) {
Variable* var = proxy->var();
if (var->mode() == CONST) {
if (var->mode() == Variable::CONST) {
if (expr->op() != Token::INIT_CONST) {
return Bailout("non-initializer assignment to const");
}
@ -3819,7 +3757,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
// variables (e.g. initialization inside a loop).
HValue* old_value = environment()->Lookup(var);
AddInstruction(new HUseConst(old_value));
} else if (var->mode() == LET) {
} else if (var->mode() == Variable::LET) {
return Bailout("unsupported assignment to let");
}
@ -3847,7 +3785,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
}
case Variable::CONTEXT: {
ASSERT(var->mode() != CONST);
ASSERT(var->mode() != Variable::CONST);
// Bail out if we try to mutate a parameter value in a function using
// the arguments object. We do not (yet) correctly handle the
// arguments property of the function.
@ -3993,7 +3931,6 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
break;
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
@ -4010,30 +3947,6 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
}
HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
HValue* checked_key,
HValue* val,
ElementsKind elements_kind,
bool is_store) {
if (is_store) {
ASSERT(val != NULL);
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
return new(zone()) HStoreKeyedFastDoubleElement(
elements, checked_key, val);
} else { // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS.
return new(zone()) HStoreKeyedFastElement(
elements, checked_key, val, elements_kind);
}
}
// It's an element load (!is_store).
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
} else { // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS.
return new(zone()) HLoadKeyedFastElement(elements, checked_key);
}
}
HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
@ -4041,20 +3954,17 @@ HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
bool is_store) {
ASSERT(expr->IsMonomorphic());
Handle<Map> map = expr->GetMonomorphicReceiverType();
AddInstruction(new(zone()) HCheckNonSmi(object));
HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map));
bool fast_smi_only_elements = map->has_fast_smi_only_elements();
bool fast_elements = map->has_fast_elements();
bool fast_double_elements = map->has_fast_double_elements();
if (!fast_smi_only_elements &&
!fast_elements &&
!fast_double_elements &&
if (!map->has_fast_elements() &&
!map->has_fast_double_elements() &&
!map->has_external_array_elements()) {
return is_store ? BuildStoreKeyedGeneric(object, key, val)
: BuildLoadKeyedGeneric(object, key);
}
AddInstruction(new(zone()) HCheckNonSmi(object));
HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map));
HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
if (is_store && (fast_elements || fast_smi_only_elements)) {
bool fast_double_elements = map->has_fast_double_elements();
if (is_store && map->has_fast_elements()) {
AddInstruction(new(zone()) HCheckMap(
elements, isolate()->factory()->fixed_array_map()));
}
@ -4069,15 +3979,28 @@ HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
return BuildExternalArrayElementAccess(external_elements, checked_key,
val, map->elements_kind(), is_store);
}
ASSERT(fast_smi_only_elements || fast_elements || fast_double_elements);
ASSERT(map->has_fast_elements() || fast_double_elements);
if (map->instance_type() == JS_ARRAY_TYPE) {
length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck));
} else {
length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
}
checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
return BuildFastElementAccess(elements, checked_key, val,
map->elements_kind(), is_store);
if (is_store) {
if (fast_double_elements) {
return new(zone()) HStoreKeyedFastDoubleElement(elements,
checked_key,
val);
} else {
return new(zone()) HStoreKeyedFastElement(elements, checked_key, val);
}
} else {
if (fast_double_elements) {
return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
} else {
return new(zone()) HLoadKeyedFastElement(elements, checked_key);
}
}
}
@ -4119,20 +4042,14 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
HLoadExternalArrayPointer* external_elements = NULL;
HInstruction* checked_key = NULL;
// Generated code assumes that FAST_SMI_ONLY_ELEMENTS, FAST_ELEMENTS,
// FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS are handled before external
// arrays.
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(FAST_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(DICTIONARY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
// FAST_ELEMENTS is assumed to be the first case.
STATIC_ASSERT(FAST_ELEMENTS == 0);
for (ElementsKind elements_kind = FIRST_ELEMENTS_KIND;
for (ElementsKind elements_kind = FAST_ELEMENTS;
elements_kind <= LAST_ELEMENTS_KIND;
elements_kind = ElementsKind(elements_kind + 1)) {
// After having handled FAST_ELEMENTS, FAST_SMI_ONLY_ELEMENTS,
// FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS, we need to add some code
// that's executed for all external array cases.
// After having handled FAST_ELEMENTS and DICTIONARY_ELEMENTS, we
// need to add some code that's executed for all external array cases.
STATIC_ASSERT(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND ==
LAST_ELEMENTS_KIND);
if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND
@ -4154,25 +4071,15 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
set_current_block(if_true);
HInstruction* access;
if (elements_kind == FAST_SMI_ONLY_ELEMENTS ||
elements_kind == FAST_ELEMENTS ||
if (elements_kind == FAST_ELEMENTS ||
elements_kind == FAST_DOUBLE_ELEMENTS) {
if (is_store && elements_kind == FAST_SMI_ONLY_ELEMENTS) {
AddInstruction(new(zone()) HCheckSmi(val));
}
if (is_store && elements_kind != FAST_DOUBLE_ELEMENTS) {
bool fast_double_elements =
elements_kind == FAST_DOUBLE_ELEMENTS;
if (is_store && elements_kind == FAST_ELEMENTS) {
AddInstruction(new(zone()) HCheckMap(
elements, isolate()->factory()->fixed_array_map(),
elements_kind_branch));
}
// TODO(jkummerow): The need for these two blocks could be avoided
// in one of two ways:
// (1) Introduce ElementsKinds for JSArrays that are distinct from
// those for fast objects.
// (2) Put the common instructions into a third "join" block. This
// requires additional AST IDs that we can deopt to from inside
// that join block. They must be added to the Property class (when
// it's a keyed property) and registered in the full codegen.
HBasicBlock* if_jsarray = graph()->CreateBasicBlock();
HBasicBlock* if_fastobject = graph()->CreateBasicBlock();
HHasInstanceTypeAndBranch* typecheck =
@ -4182,15 +4089,29 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
current_block()->Finish(typecheck);
set_current_block(if_jsarray);
HInstruction* length;
length = AddInstruction(new(zone()) HJSArrayLength(object, typecheck));
HInstruction* length = new(zone()) HJSArrayLength(object, typecheck);
AddInstruction(length);
checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
access = AddInstruction(BuildFastElementAccess(
elements, checked_key, val, elements_kind, is_store));
if (!is_store) {
if (is_store) {
if (fast_double_elements) {
access = AddInstruction(
new(zone()) HStoreKeyedFastDoubleElement(elements,
checked_key,
val));
} else {
access = AddInstruction(
new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
}
} else {
if (fast_double_elements) {
access = AddInstruction(
new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key));
} else {
access = AddInstruction(
new(zone()) HLoadKeyedFastElement(elements, checked_key));
}
Push(access);
}
*has_side_effects |= access->HasSideEffects();
if (position != -1) {
access->set_position(position);
@ -4200,8 +4121,25 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
set_current_block(if_fastobject);
length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
access = AddInstruction(BuildFastElementAccess(
elements, checked_key, val, elements_kind, is_store));
if (is_store) {
if (fast_double_elements) {
access = AddInstruction(
new(zone()) HStoreKeyedFastDoubleElement(elements,
checked_key,
val));
} else {
access = AddInstruction(
new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
}
} else {
if (fast_double_elements) {
access = AddInstruction(
new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key));
} else {
access = AddInstruction(
new(zone()) HLoadKeyedFastElement(elements, checked_key));
}
}
} else if (elements_kind == DICTIONARY_ELEMENTS) {
if (is_store) {
access = AddInstruction(BuildStoreKeyedGeneric(object, key, val));
@ -4539,25 +4477,20 @@ bool HGraphBuilder::TryInline(Call* expr) {
return false;
}
// No context change required.
CompilationInfo* outer_info = info();
#if !defined(V8_TARGET_ARCH_IA32)
// Target must be able to use caller's context.
if (target->context() != outer_info->closure()->context() ||
outer_info->scope()->contains_with() ||
outer_info->scope()->num_heap_slots() > 0) {
TraceInline(target, caller, "target requires context change");
return false;
}
#endif
// Don't inline deeper than kMaxInliningLevels calls.
HEnvironment* env = environment();
int current_level = 1;
while (env->outer() != NULL) {
if (current_level == (FLAG_limit_inlining
? Compiler::kMaxInliningLevels
: 2 * Compiler::kMaxInliningLevels)) {
if (current_level == Compiler::kMaxInliningLevels) {
TraceInline(target, caller, "inline depth limit reached");
return false;
}
@ -4663,8 +4596,7 @@ bool HGraphBuilder::TryInline(Call* expr) {
ASSERT(target_shared->has_deoptimization_support());
TypeFeedbackOracle target_oracle(
Handle<Code>(target_shared->code()),
Handle<Context>(target->context()->global_context()),
isolate());
Handle<Context>(target->context()->global_context()));
FunctionState target_state(this, &target_info, &target_oracle);
HConstant* undefined = graph()->GetConstantUndefined();
@ -4673,17 +4605,6 @@ bool HGraphBuilder::TryInline(Call* expr) {
function,
undefined,
call_kind);
#ifdef V8_TARGET_ARCH_IA32
// IA32 only, overwrite the caller's context in the deoptimization
// environment with the correct one.
//
// TODO(kmillikin): implement the same inlining on other platforms so we
// can remove the unsightly ifdefs in this function.
HConstant* context = new HConstant(Handle<Context>(target->context()),
Representation::Tagged());
AddInstruction(context);
inner_env->BindContext(context);
#endif
HBasicBlock* body_entry = CreateBasicBlock(inner_env);
current_block()->Goto(body_entry);
body_entry->SetJoinId(expr->ReturnId());
@ -5004,8 +4925,8 @@ void HGraphBuilder::VisitCall(Call* expr) {
}
} else {
expr->RecordTypeFeedback(oracle(), CALL_AS_FUNCTION);
VariableProxy* proxy = expr->expression()->AsVariableProxy();
// FIXME.
bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
if (global_call) {
@ -5057,46 +4978,6 @@ void HGraphBuilder::VisitCall(Call* expr) {
Drop(argument_count);
}
} else if (expr->IsMonomorphic()) {
// The function is on the stack in the unoptimized code during
// evaluation of the arguments.
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* function = Top();
HValue* context = environment()->LookupContext();
HGlobalObject* global = new(zone()) HGlobalObject(context);
HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global);
AddInstruction(global);
PushAndAdd(receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments()));
AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
if (TryInline(expr)) {
// The function is lingering in the deoptimization environment.
// Handle it by case analysis on the AST context.
if (ast_context()->IsEffect()) {
Drop(1);
} else if (ast_context()->IsValue()) {
HValue* result = Pop();
Drop(1);
Push(result);
} else if (ast_context()->IsTest()) {
TestContext* context = TestContext::cast(ast_context());
if (context->if_true()->HasPredecessor()) {
context->if_true()->last_environment()->Drop(1);
}
if (context->if_false()->HasPredecessor()) {
context->if_true()->last_environment()->Drop(1);
}
} else {
UNREACHABLE();
}
return;
} else {
call = PreProcessCall(new(zone()) HInvokeFunction(context,
function,
argument_count));
Drop(1); // The function.
}
} else {
CHECK_ALIVE(VisitArgument(expr->expression()));
HValue* context = environment()->LookupContext();
@ -5403,7 +5284,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
if (proxy != NULL) {
Variable* var = proxy->var();
if (var->mode() == CONST) {
if (var->mode() == Variable::CONST) {
return Bailout("unsupported count operation with const");
}
// Argument of the count operation is a variable, not a property.
@ -5790,36 +5671,26 @@ Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
}
void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
Expression* sub_expr,
void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* compare_expr,
Expression* expr,
Handle<String> check) {
CHECK_ALIVE(VisitForTypeOf(sub_expr));
HValue* value = Pop();
HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check);
instr->set_position(expr->position());
return ast_context()->ReturnControl(instr, expr->id());
CHECK_ALIVE(VisitForTypeOf(expr));
HValue* expr_value = Pop();
HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(expr_value, check);
instr->set_position(compare_expr->position());
return ast_context()->ReturnControl(instr, compare_expr->id());
}
bool HGraphBuilder::TryLiteralCompare(CompareOperation* expr) {
Expression *sub_expr;
Handle<String> check;
if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
HandleLiteralCompareTypeof(expr, sub_expr, check);
return true;
}
if (expr->IsLiteralCompareUndefined(&sub_expr)) {
HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue);
return true;
}
if (expr->IsLiteralCompareNull(&sub_expr)) {
HandleLiteralCompareNil(expr, sub_expr, kNullValue);
return true;
}
return false;
void HGraphBuilder::HandleLiteralCompareUndefined(
CompareOperation* compare_expr, Expression* expr) {
CHECK_ALIVE(VisitForValue(expr));
HValue* lhs = Pop();
HValue* rhs = graph()->GetConstantUndefined();
HCompareObjectEqAndBranch* instr =
new(zone()) HCompareObjectEqAndBranch(lhs, rhs);
instr->set_position(compare_expr->position());
return ast_context()->ReturnControl(instr, compare_expr->id());
}
@ -5841,7 +5712,17 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
}
// Check for special cases that compare against literals.
if (TryLiteralCompare(expr)) return;
Expression *sub_expr;
Handle<String> check;
if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
HandleLiteralCompareTypeof(expr, sub_expr, check);
return;
}
if (expr->IsLiteralCompareUndefined(&sub_expr)) {
HandleLiteralCompareUndefined(expr, sub_expr);
return;
}
TypeInfo type_info = oracle()->CompareType(expr);
// Check if this expression was ever executed according to type feedback.
@ -5946,18 +5827,14 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
}
void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
Expression* sub_expr,
NilValue nil) {
void HGraphBuilder::VisitCompareToNull(CompareToNull* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
CHECK_ALIVE(VisitForValue(sub_expr));
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* value = Pop();
EqualityKind kind =
expr->op() == Token::EQ_STRICT ? kStrictEquality : kNonStrictEquality;
HIsNilAndBranch* instr = new(zone()) HIsNilAndBranch(value, kind, nil);
instr->set_position(expr->position());
HIsNullAndBranch* instr =
new(zone()) HIsNullAndBranch(value, expr->is_strict());
return ast_context()->ReturnControl(instr, expr->id());
}
@ -5977,9 +5854,9 @@ void HGraphBuilder::VisitDeclaration(Declaration* decl) {
void HGraphBuilder::HandleDeclaration(VariableProxy* proxy,
VariableMode mode,
Variable::Mode mode,
FunctionLiteral* function) {
if (mode == LET) return Bailout("unsupported let declaration");
if (mode == Variable::LET) return Bailout("unsupported let declaration");
Variable* var = proxy->var();
switch (var->location()) {
case Variable::UNALLOCATED:
@ -5987,9 +5864,9 @@ void HGraphBuilder::HandleDeclaration(VariableProxy* proxy,
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT:
if (mode == CONST || function != NULL) {
if (mode == Variable::CONST || function != NULL) {
HValue* value = NULL;
if (mode == CONST) {
if (mode == Variable::CONST) {
value = graph()->GetConstantHole();
} else {
VisitForValue(function);
@ -6040,7 +5917,9 @@ void HGraphBuilder::GenerateIsFunction(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HHasInstanceTypeAndBranch* result =
new(zone()) HHasInstanceTypeAndBranch(value, JS_FUNCTION_TYPE);
new(zone()) HHasInstanceTypeAndBranch(value,
JS_FUNCTION_TYPE,
JS_FUNCTION_PROXY_TYPE);
return ast_context()->ReturnControl(result, call->id());
}
@ -6940,7 +6819,7 @@ void HPhase::End() const {
}
#ifdef DEBUG
if (graph_ != NULL) graph_->Verify(false); // No full verify.
if (graph_ != NULL) graph_->Verify();
if (allocator_ != NULL) allocator_->Verify();
#endif
}

29
deps/v8/src/hydrogen.h

@ -243,13 +243,11 @@ class HGraph: public ZoneObject {
// Returns false if there are phi-uses of the arguments-object
// which are not supported by the optimizing compiler.
bool CheckArgumentsPhiUses();
bool CheckPhis();
// Returns false if there are phi-uses of an uninitialized const
// which are not supported by the optimizing compiler.
bool CheckConstPhiUses();
void CollectPhis();
// Returns false if there are phi-uses of hole values comming
// from uninitialized consts.
bool CollectPhis();
Handle<Code> Compile(CompilationInfo* info);
@ -285,7 +283,7 @@ class HGraph: public ZoneObject {
}
#ifdef DEBUG
void Verify(bool do_full_verify) const;
void Verify() const;
#endif
private:
@ -782,7 +780,7 @@ class HGraphBuilder: public AstVisitor {
#undef INLINE_FUNCTION_GENERATOR_DECLARATION
void HandleDeclaration(VariableProxy* proxy,
VariableMode mode,
Variable::Mode mode,
FunctionLiteral* function);
void VisitDelete(UnaryOperation* expr);
@ -912,13 +910,11 @@ class HGraphBuilder: public AstVisitor {
HValue* receiver,
SmallMapList* types,
Handle<String> name);
bool TryLiteralCompare(CompareOperation* expr);
void HandleLiteralCompareTypeof(CompareOperation* expr,
Expression* sub_expr,
void HandleLiteralCompareTypeof(CompareOperation* compare_expr,
Expression* expr,
Handle<String> check);
void HandleLiteralCompareNil(CompareOperation* expr,
Expression* sub_expr,
NilValue nil);
void HandleLiteralCompareUndefined(CompareOperation* compare_expr,
Expression* expr);
HStringCharCodeAt* BuildStringCharCodeAt(HValue* context,
HValue* string,
@ -942,11 +938,6 @@ class HGraphBuilder: public AstVisitor {
HValue* val,
ElementsKind elements_kind,
bool is_store);
HInstruction* BuildFastElementAccess(HValue* elements,
HValue* checked_key,
HValue* val,
ElementsKind elements_kind,
bool is_store);
HInstruction* BuildMonomorphicElementAccess(HValue* object,
HValue* key,

26
deps/v8/src/ia32/assembler-ia32-inl.h

@ -89,13 +89,8 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target) {
Assembler::set_target_address_at(pc_, target);
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
if (host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
}
Assembler::set_target_address_at(pc_, target);
}
@ -121,10 +116,6 @@ void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target;
CPU::FlushICache(pc_, sizeof(Address));
if (host() != NULL && target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
host(), &Memory::Object_at(pc_), HeapObject::cast(target));
}
}
@ -156,12 +147,6 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
CPU::FlushICache(pc_, sizeof(Address));
if (host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
// evacuation candidate.
host()->GetHeap()->incremental_marking()->RecordWrite(
host(), NULL, cell);
}
}
@ -176,11 +161,6 @@ void RelocInfo::set_call_address(Address target) {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
Assembler::set_target_address_at(pc_ + 1, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
}
}
@ -214,7 +194,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
visitor->VisitPointer(target_object_address());
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
@ -242,7 +222,7 @@ template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitEmbeddedPointer(heap, this);
StaticVisitor::VisitPointer(heap, target_object_address());
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);

83
deps/v8/src/ia32/assembler-ia32.cc

@ -55,8 +55,6 @@ uint64_t CpuFeatures::supported_ = 0;
uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
// The Probe method needs executable memory, so it uses Heap::CreateCode.
// Allocation failure is silent and leads to safe default.
void CpuFeatures::Probe() {
ASSERT(!initialized_);
ASSERT(supported_ == 0);
@ -88,23 +86,23 @@ void CpuFeatures::Probe() {
__ pushfd();
__ push(ecx);
__ push(ebx);
__ mov(ebp, esp);
__ mov(ebp, Operand(esp));
// If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
__ pushfd();
__ pop(eax);
__ mov(edx, eax);
__ mov(edx, Operand(eax));
__ xor_(eax, 0x200000); // Flip bit 21.
__ push(eax);
__ popfd();
__ pushfd();
__ pop(eax);
__ xor_(eax, edx); // Different if CPUID is supported.
__ xor_(eax, Operand(edx)); // Different if CPUID is supported.
__ j(not_zero, &cpuid);
// CPUID not supported. Clear the supported features in edx:eax.
__ xor_(eax, eax);
__ xor_(edx, edx);
__ xor_(eax, Operand(eax));
__ xor_(edx, Operand(edx));
__ jmp(&done);
// Invoke CPUID with 1 in eax to get feature information in
@ -120,13 +118,13 @@ void CpuFeatures::Probe() {
// Move the result from ecx:edx to edx:eax and make sure to mark the
// CPUID feature as supported.
__ mov(eax, edx);
__ mov(eax, Operand(edx));
__ or_(eax, 1 << CPUID);
__ mov(edx, ecx);
__ mov(edx, Operand(ecx));
// Done.
__ bind(&done);
__ mov(esp, ebp);
__ mov(esp, Operand(ebp));
__ pop(ebx);
__ pop(ecx);
__ popfd();
@ -288,18 +286,6 @@ bool Operand::is_reg(Register reg) const {
&& ((buf_[0] & 0x07) == reg.code()); // register codes match.
}
bool Operand::is_reg_only() const {
return (buf_[0] & 0xF8) == 0xC0; // Addressing mode is register only.
}
Register Operand::reg() const {
ASSERT(is_reg_only());
return Register::from_code(buf_[0] & 0x07);
}
// -----------------------------------------------------------------------------
// Implementation of Assembler.
@ -715,13 +701,6 @@ void Assembler::add(Register dst, const Operand& src) {
}
void Assembler::add(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x01);
emit_operand(src, dst);
}
void Assembler::add(const Operand& dst, const Immediate& x) {
ASSERT(reloc_info_writer.last_pc() != NULL);
EnsureSpace ensure_space(this);
@ -762,29 +741,25 @@ void Assembler::and_(const Operand& dst, Register src) {
void Assembler::cmpb(const Operand& op, int8_t imm8) {
EnsureSpace ensure_space(this);
if (op.is_reg(eax)) {
EMIT(0x3C);
} else {
EMIT(0x80);
emit_operand(edi, op); // edi == 7
}
EMIT(imm8);
}
void Assembler::cmpb(const Operand& op, Register reg) {
ASSERT(reg.is_byte_register());
void Assembler::cmpb(const Operand& dst, Register src) {
ASSERT(src.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x38);
emit_operand(reg, op);
emit_operand(src, dst);
}
void Assembler::cmpb(Register reg, const Operand& op) {
ASSERT(reg.is_byte_register());
void Assembler::cmpb(Register dst, const Operand& src) {
ASSERT(dst.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x3A);
emit_operand(reg, op);
emit_operand(dst, src);
}
@ -1094,6 +1069,18 @@ void Assembler::shr_cl(Register dst) {
}
void Assembler::subb(const Operand& op, int8_t imm8) {
EnsureSpace ensure_space(this);
if (op.is_reg(eax)) {
EMIT(0x2c);
} else {
EMIT(0x80);
emit_operand(ebp, op); // ebp == 5
}
EMIT(imm8);
}
void Assembler::sub(const Operand& dst, const Immediate& x) {
EnsureSpace ensure_space(this);
emit_arith(5, dst, x);
@ -1107,6 +1094,14 @@ void Assembler::sub(Register dst, const Operand& src) {
}
void Assembler::subb(Register dst, const Operand& src) {
ASSERT(dst.code() < 4);
EnsureSpace ensure_space(this);
EMIT(0x2A);
emit_operand(dst, src);
}
void Assembler::sub(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x29);
@ -1163,10 +1158,6 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
void Assembler::test_b(const Operand& op, uint8_t imm8) {
if (op.is_reg_only() && op.reg().code() >= 4) {
test(op, Immediate(imm8));
return;
}
EnsureSpace ensure_space(this);
EMIT(0xF6);
emit_operand(eax, op);
@ -1187,10 +1178,10 @@ void Assembler::xor_(Register dst, const Operand& src) {
}
void Assembler::xor_(const Operand& dst, Register src) {
void Assembler::xor_(const Operand& src, Register dst) {
EnsureSpace ensure_space(this);
EMIT(0x31);
emit_operand(src, dst);
emit_operand(dst, src);
}
@ -2480,7 +2471,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
return;
}
}
RelocInfo rinfo(pc_, rmode, data, NULL);
RelocInfo rinfo(pc_, rmode, data);
reloc_info_writer.Write(&rinfo);
}

88
deps/v8/src/ia32/assembler-ia32.h

@ -75,8 +75,6 @@ struct Register {
static inline Register FromAllocationIndex(int index);
static Register from_code(int code) {
ASSERT(code >= 0);
ASSERT(code < kNumRegisters);
Register r = { code };
return r;
}
@ -302,6 +300,9 @@ enum ScaleFactor {
class Operand BASE_EMBEDDED {
public:
// reg
INLINE(explicit Operand(Register reg));
// XMM reg
INLINE(explicit Operand(XMMRegister xmm_reg));
@ -346,16 +347,12 @@ class Operand BASE_EMBEDDED {
// Returns true if this Operand is a wrapper for the specified register.
bool is_reg(Register reg) const;
// Returns true if this Operand is a wrapper for one register.
bool is_reg_only() const;
// Asserts that this Operand is a wrapper for one register and returns the
// register.
Register reg() const;
private:
// reg
INLINE(explicit Operand(Register reg));
byte buf_[6];
// The number of bytes in buf_.
unsigned int len_;
// Only valid if len_ > 4.
RelocInfo::Mode rmode_;
// Set the ModRM byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation.
@ -365,15 +362,7 @@ class Operand BASE_EMBEDDED {
inline void set_disp8(int8_t disp);
inline void set_dispr(int32_t disp, RelocInfo::Mode rmode);
byte buf_[6];
// The number of bytes in buf_.
unsigned int len_;
// Only valid if len_ > 4.
RelocInfo::Mode rmode_;
friend class Assembler;
friend class MacroAssembler;
friend class LCodeGen;
};
@ -682,9 +671,7 @@ class Assembler : public AssemblerBase {
void leave();
// Moves
void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
void mov_b(Register dst, const Operand& src);
void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
void mov_b(const Operand& dst, int8_t imm8);
void mov_b(const Operand& dst, Register src);
@ -700,24 +687,17 @@ class Assembler : public AssemblerBase {
void mov(const Operand& dst, Handle<Object> handle);
void mov(const Operand& dst, Register src);
void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); }
void movsx_b(Register dst, const Operand& src);
void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); }
void movsx_w(Register dst, const Operand& src);
void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); }
void movzx_b(Register dst, const Operand& src);
void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); }
void movzx_w(Register dst, const Operand& src);
// Conditional moves
void cmov(Condition cc, Register dst, int32_t imm32);
void cmov(Condition cc, Register dst, Handle<Object> handle);
void cmov(Condition cc, Register dst, Register src) {
cmov(cc, dst, Operand(src));
}
void cmov(Condition cc, Register dst, const Operand& src);
// Flag management.
@ -735,31 +715,24 @@ class Assembler : public AssemblerBase {
void adc(Register dst, int32_t imm32);
void adc(Register dst, const Operand& src);
void add(Register dst, Register src) { add(dst, Operand(src)); }
void add(Register dst, const Operand& src);
void add(const Operand& dst, Register src);
void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); }
void add(const Operand& dst, const Immediate& x);
void and_(Register dst, int32_t imm32);
void and_(Register dst, const Immediate& x);
void and_(Register dst, Register src) { and_(dst, Operand(src)); }
void and_(Register dst, const Operand& src);
void and_(const Operand& dst, Register src);
void and_(const Operand& src, Register dst);
void and_(const Operand& dst, const Immediate& x);
void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); }
void cmpb(const Operand& op, int8_t imm8);
void cmpb(Register reg, const Operand& op);
void cmpb(const Operand& op, Register reg);
void cmpb(Register src, const Operand& dst);
void cmpb(const Operand& dst, Register src);
void cmpb_al(const Operand& op);
void cmpw_ax(const Operand& op);
void cmpw(const Operand& op, Immediate imm16);
void cmp(Register reg, int32_t imm32);
void cmp(Register reg, Handle<Object> handle);
void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
void cmp(Register reg, const Operand& op);
void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
void cmp(const Operand& op, const Immediate& imm);
void cmp(const Operand& op, Handle<Object> handle);
@ -775,7 +748,6 @@ class Assembler : public AssemblerBase {
// Signed multiply instructions.
void imul(Register src); // edx:eax = eax * src.
void imul(Register dst, Register src) { imul(dst, Operand(src)); }
void imul(Register dst, const Operand& src); // dst = dst * src.
void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32.
@ -792,10 +764,8 @@ class Assembler : public AssemblerBase {
void not_(Register dst);
void or_(Register dst, int32_t imm32);
void or_(Register dst, Register src) { or_(dst, Operand(src)); }
void or_(Register dst, const Operand& src);
void or_(const Operand& dst, Register src);
void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); }
void or_(const Operand& dst, const Immediate& x);
void rcl(Register dst, uint8_t imm8);
@ -806,42 +776,35 @@ class Assembler : public AssemblerBase {
void sbb(Register dst, const Operand& src);
void shld(Register dst, Register src) { shld(dst, Operand(src)); }
void shld(Register dst, const Operand& src);
void shl(Register dst, uint8_t imm8);
void shl_cl(Register dst);
void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
void shrd(Register dst, const Operand& src);
void shr(Register dst, uint8_t imm8);
void shr_cl(Register dst);
void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
void subb(const Operand& dst, int8_t imm8);
void subb(Register dst, const Operand& src);
void sub(const Operand& dst, const Immediate& x);
void sub(Register dst, Register src) { sub(dst, Operand(src)); }
void sub(Register dst, const Operand& src);
void sub(const Operand& dst, Register src);
void test(Register reg, const Immediate& imm);
void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
void test(Register reg, const Operand& op);
void test_b(Register reg, const Operand& op);
void test(const Operand& op, const Immediate& imm);
void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); }
void test_b(const Operand& op, uint8_t imm8);
void xor_(Register dst, int32_t imm32);
void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
void xor_(Register dst, const Operand& src);
void xor_(const Operand& dst, Register src);
void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); }
void xor_(const Operand& src, Register dst);
void xor_(const Operand& dst, const Immediate& x);
// Bit operations.
void bt(const Operand& dst, Register src);
void bts(Register dst, Register src) { bts(Operand(dst), src); }
void bts(const Operand& dst, Register src);
// Miscellaneous
@ -872,7 +835,6 @@ class Assembler : public AssemblerBase {
void call(Label* L);
void call(byte* entry, RelocInfo::Mode rmode);
int CallSize(const Operand& adr);
void call(Register reg) { call(Operand(reg)); }
void call(const Operand& adr);
int CallSize(Handle<Code> code, RelocInfo::Mode mode);
void call(Handle<Code> code,
@ -883,7 +845,6 @@ class Assembler : public AssemblerBase {
// unconditional jump to L
void jmp(Label* L, Label::Distance distance = Label::kFar);
void jmp(byte* entry, RelocInfo::Mode rmode);
void jmp(Register reg) { jmp(Operand(reg)); }
void jmp(const Operand& adr);
void jmp(Handle<Code> code, RelocInfo::Mode rmode);
@ -968,7 +929,6 @@ class Assembler : public AssemblerBase {
void cvttss2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, const Operand& src);
void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
void cvtsi2sd(XMMRegister dst, const Operand& src);
void cvtss2sd(XMMRegister dst, XMMRegister src);
void cvtsd2ss(XMMRegister dst, XMMRegister src);
@ -1009,14 +969,12 @@ class Assembler : public AssemblerBase {
void movdbl(XMMRegister dst, const Operand& src);
void movdbl(const Operand& dst, XMMRegister src);
void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
void movd(XMMRegister dst, const Operand& src);
void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
void movd(const Operand& dst, XMMRegister src);
void movd(const Operand& src, XMMRegister dst);
void movsd(XMMRegister dst, XMMRegister src);
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
void movss(const Operand& src, XMMRegister dst);
void movss(XMMRegister dst, XMMRegister src);
void pand(XMMRegister dst, XMMRegister src);
@ -1029,17 +987,11 @@ class Assembler : public AssemblerBase {
void psrlq(XMMRegister reg, int8_t shift);
void psrlq(XMMRegister dst, XMMRegister src);
void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
void pextrd(Register dst, XMMRegister src, int8_t offset) {
pextrd(Operand(dst), src, offset);
}
void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
void pinsrd(XMMRegister dst, Register src, int8_t offset) {
pinsrd(dst, Operand(src), offset);
}
void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
// Parallel XMM operations.
void movntdqa(XMMRegister dst, const Operand& src);
void movntdqa(XMMRegister src, const Operand& dst);
void movntdq(const Operand& dst, XMMRegister src);
// Prefetch src position into cache level.
// Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
@ -1093,9 +1045,6 @@ class Assembler : public AssemblerBase {
static const int kMaximalBufferSize = 512*MB;
static const int kMinimalBufferSize = 4*KB;
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
protected:
bool emit_debug_code() const { return emit_debug_code_; }
@ -1108,8 +1057,9 @@ class Assembler : public AssemblerBase {
byte* addr_at(int pos) { return buffer_ + pos; }
private:
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
uint32_t long_at(int pos) {
return *reinterpret_cast<uint32_t*>(addr_at(pos));
}

251
deps/v8/src/ia32/builtins-ia32.cc

@ -69,7 +69,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// JumpToExternalReference expects eax to contain the number of arguments
// including the receiver and the extra arguments.
__ add(eax, Immediate(num_extra_args + 1));
__ add(Operand(eax), Immediate(num_extra_args + 1));
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@ -80,34 +80,25 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// -- edi: constructor function
// -----------------------------------
Label slow, non_function_call;
Label non_function_call;
// Check that function is not a smi.
__ JumpIfSmi(edi, &non_function_call);
// Check that function is a JSFunction.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &slow);
__ j(not_equal, &non_function_call);
// Jump to the function-specific construct stub.
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
__ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
__ jmp(ebx);
__ jmp(Operand(ebx));
// edi: called object
// eax: number of arguments
// ecx: object map
Label do_call;
__ bind(&slow);
__ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
__ j(not_equal, &non_function_call);
__ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ jmp(&do_call);
__ bind(&non_function_call);
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ bind(&do_call);
// Set expected number of arguments to zero (not changing eax).
__ Set(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
Handle<Code> arguments_adaptor =
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
__ SetCallKind(ecx, CALL_AS_METHOD);
@ -122,8 +113,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
ASSERT(!is_api_function || !count_constructions);
// Enter a construct frame.
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
__ EnterConstructFrame();
// Store a smi-tagged arguments count on the stack.
__ SmiTag(eax);
@ -132,8 +122,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Push the function to invoke on the stack.
__ push(edi);
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
// Try to allocate the object without transitioning into C code. If any of the
// preconditions is not met, the code bails out to the runtime call.
Label rt_call, allocated;
if (FLAG_inline_new) {
Label undo_allocation;
@ -155,9 +145,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ CmpObjectType(eax, MAP_TYPE, ebx);
__ j(not_equal, &rt_call);
// Check that the constructor is not constructing a JSFunction (see
// comments in Runtime_NewObject in runtime.cc). In which case the
// initial map's instance type would be JS_FUNCTION_TYPE.
// Check that the constructor is not constructing a JSFunction (see comments
// in Runtime_NewObject in runtime.cc). In which case the initial map's
// instance type would be JS_FUNCTION_TYPE.
// edi: constructor
// eax: initial map
__ CmpInstanceType(eax, JS_FUNCTION_TYPE);
@ -167,8 +157,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Label allocate;
// Decrease generous allocation count.
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ dec_b(FieldOperand(ecx,
SharedFunctionInfo::kConstructionCountOffset));
__ dec_b(FieldOperand(ecx, SharedFunctionInfo::kConstructionCountOffset));
__ j(not_zero, &allocate);
__ push(eax);
@ -189,8 +178,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// eax: initial map
__ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
__ shl(edi, kPointerSizeLog2);
__ AllocateInNewSpace(
edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
__ AllocateInNewSpace(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields.
// eax: initial map
// ebx: JSObject
@ -204,32 +192,31 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// eax: initial map
// ebx: JSObject
// edi: start of next object
__ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
__ mov(edx, factory->undefined_value());
{ Label loop, entry;
// To allow for truncation.
if (count_constructions) {
__ movzx_b(esi,
FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
__ lea(esi,
Operand(ebx, esi, times_pointer_size, JSObject::kHeaderSize));
// esi: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
__ cmp(esi, edi);
__ Assert(less_equal,
"Unexpected number of pre-allocated property fields.");
}
__ InitializeFieldsWithFiller(ecx, esi, edx);
__ mov(edx, factory->one_pointer_filler_map());
} else {
__ mov(edx, factory->undefined_value());
}
__ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
__ jmp(&entry);
__ bind(&loop);
__ mov(Operand(ecx, 0), edx);
__ add(Operand(ecx), Immediate(kPointerSize));
__ bind(&entry);
__ cmp(ecx, Operand(edi));
__ j(less, &loop);
}
__ InitializeFieldsWithFiller(ecx, edi, edx);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
// failures need to undo the allocation, so that the heap is in a
// consistent state and verifiable.
// Add the object tag to make the JSObject real, so that we can continue and
// jump into the continuation code at any time from now on. Any failures
// need to undo the allocation, so that the heap is in a consistent state
// and verifiable.
// eax: initial map
// ebx: JSObject
// edi: start of next object
__ or_(ebx, Immediate(kHeapObjectTag));
__ or_(Operand(ebx), Immediate(kHeapObjectTag));
// Check if a non-empty properties array is needed.
// Allocate and initialize a FixedArray if it is.
@ -238,12 +225,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// edi: start of next object
// Calculate the total number of properties described by the map.
__ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
__ movzx_b(ecx,
FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
__ add(edx, ecx);
__ movzx_b(ecx, FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
__ add(edx, Operand(ecx));
// Calculate unused properties past the end of the in-object properties.
__ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
__ sub(edx, ecx);
__ sub(edx, Operand(ecx));
// Done if no extra properties are to be allocated.
__ j(zero, &allocated);
__ Assert(positive, "Property allocation count failed.");
@ -282,9 +268,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ jmp(&entry);
__ bind(&loop);
__ mov(Operand(eax, 0), edx);
__ add(eax, Immediate(kPointerSize));
__ add(Operand(eax), Immediate(kPointerSize));
__ bind(&entry);
__ cmp(eax, ecx);
__ cmp(eax, Operand(ecx));
__ j(below, &loop);
}
@ -292,7 +278,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// the JSObject
// ebx: JSObject
// edi: FixedArray
__ or_(edi, Immediate(kHeapObjectTag)); // add the heap tag
__ or_(Operand(edi), Immediate(kHeapObjectTag)); // add the heap tag
__ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
@ -315,7 +301,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// edi: function (constructor)
__ push(edi);
__ CallRuntime(Runtime::kNewObject, 1);
__ mov(ebx, eax); // store result in ebx
__ mov(ebx, Operand(eax)); // store result in ebx
// New object allocated.
// ebx: newly allocated object
@ -338,7 +324,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Copy arguments and receiver to the expression stack.
Label loop, entry;
__ mov(ecx, eax);
__ mov(ecx, Operand(eax));
__ jmp(&entry);
__ bind(&loop);
__ push(Operand(ebx, ecx, times_4, 0));
@ -383,10 +369,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Restore the arguments count and leave the construct frame.
__ bind(&exit);
__ mov(ebx, Operand(esp, kPointerSize)); // Get arguments count.
// Leave construct frame.
}
__ mov(ebx, Operand(esp, kPointerSize)); // get arguments count
__ LeaveConstructFrame();
// Remove caller arguments from the stack and return.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
@ -415,11 +399,11 @@ void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Clear the context before we push it when entering the internal frame.
// Clear the context before we push it when entering the JS frame.
__ Set(esi, Immediate(0));
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Enter an internal frame.
__ EnterInternalFrame();
// Load the previous frame pointer (ebx) to access C arguments
__ mov(ebx, Operand(ebp, 0));
@ -443,14 +427,13 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&loop);
__ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
__ push(Operand(edx, 0)); // dereference handle
__ inc(ecx);
__ inc(Operand(ecx));
__ bind(&entry);
__ cmp(ecx, eax);
__ cmp(ecx, Operand(eax));
__ j(not_equal, &loop);
// Get the function from the stack and call it.
// kPointerSize for the receiver.
__ mov(edi, Operand(esp, eax, times_4, kPointerSize));
__ mov(edi, Operand(esp, eax, times_4, +1 * kPointerSize)); // +1 ~ receiver
// Invoke the code.
if (is_construct) {
@ -462,11 +445,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
NullCallWrapper(), CALL_AS_METHOD);
}
// Exit the internal frame. Notice that this also removes the empty.
// Exit the JS frame. Notice that this also removes the empty
// context and the function left on the stack by the code
// invocation.
}
__ ret(kPointerSize); // Remove receiver.
__ LeaveInternalFrame();
__ ret(1 * kPointerSize); // remove receiver
}
@ -481,8 +464,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Enter an internal frame.
__ EnterInternalFrame();
// Push a copy of the function.
__ push(edi);
@ -497,18 +480,18 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
// Restore receiver.
__ pop(edi);
// Tear down internal frame.
}
// Tear down temporary frame.
__ LeaveInternalFrame();
// Do a tail-call of the compiled function.
__ lea(eax, FieldOperand(eax, Code::kHeaderSize));
__ jmp(eax);
__ jmp(Operand(eax));
}
void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Enter an internal frame.
__ EnterInternalFrame();
// Push a copy of the function onto the stack.
__ push(edi);
@ -523,26 +506,26 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
// Restore receiver.
__ pop(edi);
// Tear down internal frame.
}
// Tear down temporary frame.
__ LeaveInternalFrame();
// Do a tail-call of the compiled function.
__ lea(eax, FieldOperand(eax, Code::kHeaderSize));
__ jmp(eax);
__ jmp(Operand(eax));
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Enter an internal frame.
__ EnterInternalFrame();
// Pass the function and deoptimization type to the runtime system.
__ push(Immediate(Smi::FromInt(static_cast<int>(type))));
__ CallRuntime(Runtime::kNotifyDeoptimized, 1);
// Tear down internal frame.
}
// Tear down temporary frame.
__ LeaveInternalFrame();
// Get the full codegen state from the stack and untag it.
__ mov(ecx, Operand(esp, 1 * kPointerSize));
@ -583,10 +566,9 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
// the registers without worrying about which of them contain
// pointers. This seems a bit fragile.
__ pushad();
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
__ CallRuntime(Runtime::kNotifyOSR, 0);
}
__ LeaveInternalFrame();
__ popad();
__ ret(0);
}
@ -597,7 +579,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
{ Label done;
__ test(eax, eax);
__ test(eax, Operand(eax));
__ j(not_zero, &done);
__ pop(ebx);
__ push(Immediate(factory->undefined_value()));
@ -649,9 +631,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ j(above_equal, &shift_arguments);
__ bind(&convert_to_object);
{ // In order to preserve argument count.
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame(); // In order to preserve argument count.
__ SmiTag(eax);
__ push(eax);
@ -662,8 +642,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ pop(eax);
__ SmiUntag(eax);
}
__ LeaveInternalFrame();
// Restore the function to edi.
__ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
__ jmp(&patch_receiver);
@ -716,11 +695,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
// or a function proxy via CALL_FUNCTION_PROXY.
{ Label function, non_proxy;
__ test(edx, edx);
__ test(edx, Operand(edx));
__ j(zero, &function);
__ Set(ebx, Immediate(0));
__ SetCallKind(ecx, CALL_AS_METHOD);
__ cmp(edx, Immediate(1));
__ cmp(Operand(edx), Immediate(1));
__ j(not_equal, &non_proxy);
__ pop(edx); // return address
@ -747,13 +726,13 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
__ SmiUntag(ebx);
__ SetCallKind(ecx, CALL_AS_METHOD);
__ cmp(eax, ebx);
__ cmp(eax, Operand(ebx));
__ j(not_equal,
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline());
ParameterCount expected(0);
__ InvokeCode(edx, expected, expected, JUMP_FUNCTION, NullCallWrapper(),
CALL_AS_METHOD);
__ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
}
@ -761,8 +740,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
static const int kArgumentsOffset = 2 * kPointerSize;
static const int kReceiverOffset = 3 * kPointerSize;
static const int kFunctionOffset = 4 * kPointerSize;
{
FrameScope frame_scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
__ push(Operand(ebp, kFunctionOffset)); // push this
__ push(Operand(ebp, kArgumentsOffset)); // push arguments
@ -777,14 +756,14 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ mov(edi, Operand::StaticVariable(real_stack_limit));
// Make ecx the space we have left. The stack might already be overflowed
// here which will cause ecx to become negative.
__ mov(ecx, esp);
__ sub(ecx, edi);
__ mov(ecx, Operand(esp));
__ sub(ecx, Operand(edi));
// Make edx the space we need for the array when it is unrolled onto the
// stack.
__ mov(edx, eax);
__ mov(edx, Operand(eax));
__ shl(edx, kPointerSizeLog2 - kSmiTagSize);
// Check if the arguments will overflow the stack.
__ cmp(ecx, edx);
__ cmp(ecx, Operand(edx));
__ j(greater, &okay); // Signed comparison.
// Out of stack space.
@ -843,7 +822,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ bind(&call_to_object);
__ push(ebx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ mov(ebx, eax);
__ mov(ebx, Operand(eax));
__ jmp(&push_receiver);
// Use the current global receiver object as the receiver.
@ -879,7 +858,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Update the index on the stack and in register eax.
__ mov(eax, Operand(ebp, kIndexOffset));
__ add(eax, Immediate(1 << kSmiTagSize));
__ add(Operand(eax), Immediate(1 << kSmiTagSize));
__ mov(Operand(ebp, kIndexOffset), eax);
__ bind(&entry);
@ -896,7 +875,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ InvokeFunction(edi, actual, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
frame_scope.GenerateLeaveFrame();
__ LeaveInternalFrame();
__ ret(3 * kPointerSize); // remove this, receiver, and arguments
// Invoke the function proxy.
@ -909,8 +888,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
// Leave internal frame.
}
__ LeaveInternalFrame();
__ ret(3 * kPointerSize); // remove this, receiver, and arguments
}
@ -1005,9 +983,9 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ jmp(&entry);
__ bind(&loop);
__ mov(Operand(scratch1, 0), factory->the_hole_value());
__ add(scratch1, Immediate(kPointerSize));
__ add(Operand(scratch1), Immediate(kPointerSize));
__ bind(&entry);
__ cmp(scratch1, scratch2);
__ cmp(scratch1, Operand(scratch2));
__ j(below, &loop);
}
}
@ -1104,7 +1082,7 @@ static void AllocateJSArray(MacroAssembler* masm,
__ bind(&loop);
__ stos();
__ bind(&entry);
__ cmp(edi, elements_array_end);
__ cmp(edi, Operand(elements_array_end));
__ j(below, &loop);
__ bind(&done);
}
@ -1142,7 +1120,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ push(eax);
// Check for array construction with zero arguments.
__ test(eax, eax);
__ test(eax, Operand(eax));
__ j(not_zero, &argc_one_or_more);
__ bind(&empty_array);
@ -1169,7 +1147,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ j(not_equal, &argc_two_or_more);
STATIC_ASSERT(kSmiTag == 0);
__ mov(ecx, Operand(esp, (push_count + 1) * kPointerSize));
__ test(ecx, ecx);
__ test(ecx, Operand(ecx));
__ j(not_zero, &not_empty_array);
// The single argument passed is zero, so we jump to the code above used to
@ -1182,7 +1160,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ mov(eax, Operand(esp, i * kPointerSize));
__ mov(Operand(esp, (i + 1) * kPointerSize), eax);
}
__ add(esp, Immediate(2 * kPointerSize)); // Drop two stack slots.
__ add(Operand(esp), Immediate(2 * kPointerSize)); // Drop two stack slots.
__ push(Immediate(0)); // Treat this as a call with argc of zero.
__ jmp(&empty_array);
@ -1272,7 +1250,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ bind(&loop);
__ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
__ mov(Operand(edx, 0), eax);
__ add(edx, Immediate(kPointerSize));
__ add(Operand(edx), Immediate(kPointerSize));
__ bind(&entry);
__ dec(ecx);
__ j(greater_equal, &loop);
@ -1378,14 +1356,14 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
__ cmp(edi, ecx);
__ cmp(edi, Operand(ecx));
__ Assert(equal, "Unexpected String function");
}
// Load the first argument into eax and get rid of the rest
// (including the receiver).
Label no_arguments;
__ test(eax, eax);
__ test(eax, Operand(eax));
__ j(zero, &no_arguments);
__ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
__ pop(ecx);
@ -1461,13 +1439,12 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Invoke the conversion builtin and put the result into ebx.
__ bind(&convert_argument);
__ IncrementCounter(counters->string_ctor_conversions(), 1);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
__ push(edi); // Preserve the function.
__ push(eax);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
__ pop(edi);
}
__ LeaveInternalFrame();
__ mov(ebx, eax);
__ jmp(&argument_is_string);
@ -1484,18 +1461,17 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// create a string wrapper.
__ bind(&gc_required);
__ IncrementCounter(counters->string_ctor_gc_required(), 1);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
__ push(ebx);
__ CallRuntime(Runtime::kNewStringWrapper, 1);
}
__ LeaveInternalFrame();
__ ret(0);
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(ebp);
__ mov(ebp, esp);
__ mov(ebp, Operand(esp));
// Store the arguments adaptor context sentinel.
__ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@ -1539,7 +1515,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
Label enough, too_few;
__ cmp(eax, ebx);
__ cmp(eax, Operand(ebx));
__ j(less, &too_few);
__ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
__ j(equal, &dont_adapt_arguments);
@ -1557,8 +1533,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&copy);
__ inc(edi);
__ push(Operand(eax, 0));
__ sub(eax, Immediate(kPointerSize));
__ cmp(edi, ebx);
__ sub(Operand(eax), Immediate(kPointerSize));
__ cmp(edi, Operand(ebx));
__ j(less, &copy);
__ jmp(&invoke);
}
@ -1571,17 +1547,17 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
const int offset = StandardFrameConstants::kCallerSPOffset;
__ lea(edi, Operand(ebp, eax, times_4, offset));
// ebx = expected - actual.
__ sub(ebx, eax);
__ sub(ebx, Operand(eax));
// eax = -actual - 1
__ neg(eax);
__ sub(eax, Immediate(1));
__ sub(Operand(eax), Immediate(1));
Label copy;
__ bind(&copy);
__ inc(eax);
__ push(Operand(edi, 0));
__ sub(edi, Immediate(kPointerSize));
__ test(eax, eax);
__ sub(Operand(edi), Immediate(kPointerSize));
__ test(eax, Operand(eax));
__ j(not_zero, &copy);
// Fill remaining expected arguments with undefined values.
@ -1589,7 +1565,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&fill);
__ inc(eax);
__ push(Immediate(masm->isolate()->factory()->undefined_value()));
__ cmp(eax, ebx);
__ cmp(eax, Operand(ebx));
__ j(less, &fill);
}
@ -1597,7 +1573,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&invoke);
// Restore function pointer.
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ call(edx);
__ call(Operand(edx));
// Leave frame and return.
LeaveArgumentsAdaptorFrame(masm);
@ -1607,13 +1583,13 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
__ jmp(edx);
__ jmp(Operand(edx));
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
CpuFeatures::TryForceFeatureScope scope(SSE2);
if (!CpuFeatures::IsSupported(SSE2) && FLAG_debug_code) {
if (!CpuFeatures::IsSupported(SSE2)) {
__ Abort("Unreachable code: Cannot optimize without SSE2 support.");
return;
}
@ -1640,16 +1616,15 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Pass the function to optimize as the argument to the on-stack
// replacement runtime function.
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
__ push(eax);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
}
__ LeaveInternalFrame();
// If the result was -1 it means that we couldn't optimize the
// function. Just return and continue in the unoptimized version.
Label skip;
__ cmp(eax, Immediate(Smi::FromInt(-1)));
__ cmp(Operand(eax), Immediate(Smi::FromInt(-1)));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
@ -1663,9 +1638,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ j(above_equal, &ok, Label::kNear);
StackCheckStub stub;
__ TailCallStub(&stub);
if (FLAG_debug_code) {
__ Abort("Unreachable code: returned from tail call.");
}
__ bind(&ok);
__ ret(0);

1074
deps/v8/src/ia32/code-stubs-ia32.cc

File diff suppressed because it is too large

291
deps/v8/src/ia32/code-stubs-ia32.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -60,25 +60,6 @@ class TranscendentalCacheStub: public CodeStub {
};
class StoreBufferOverflowStub: public CodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
void Generate(MacroAssembler* masm);
virtual bool IsPregenerated() { return true; }
static void GenerateFixedRegStubsAheadOfTime();
virtual bool SometimesSetsUpAFrame() { return false; }
private:
SaveFPRegsMode save_doubles_;
Major MajorKey() { return StoreBufferOverflow; }
int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
};
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op,
@ -437,8 +418,6 @@ class StringDictionaryLookupStub: public CodeStub {
Register r0,
Register r1);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
@ -451,7 +430,7 @@ class StringDictionaryLookupStub: public CodeStub {
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
Major MajorKey() { return StringDictionaryLookup; }
Major MajorKey() { return StringDictionaryNegativeLookup; }
int MinorKey() {
return DictionaryBits::encode(dictionary_.code()) |
@ -472,272 +451,6 @@ class StringDictionaryLookupStub: public CodeStub {
};
class RecordWriteStub: public CodeStub {
public:
RecordWriteStub(Register object,
Register value,
Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
: object_(object),
value_(value),
address_(address),
remembered_set_action_(remembered_set_action),
save_fp_regs_mode_(fp_mode),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
}
enum Mode {
STORE_BUFFER_ONLY,
INCREMENTAL,
INCREMENTAL_COMPACTION
};
virtual bool IsPregenerated();
static void GenerateFixedRegStubsAheadOfTime();
virtual bool SometimesSetsUpAFrame() { return false; }
static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8.
static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32.
static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32.
static Mode GetMode(Code* stub) {
byte first_instruction = stub->instruction_start()[0];
byte second_instruction = stub->instruction_start()[2];
if (first_instruction == kTwoByteJumpInstruction) {
return INCREMENTAL;
}
ASSERT(first_instruction == kTwoByteNopInstruction);
if (second_instruction == kFiveByteJumpInstruction) {
return INCREMENTAL_COMPACTION;
}
ASSERT(second_instruction == kFiveByteNopInstruction);
return STORE_BUFFER_ONLY;
}
static void Patch(Code* stub, Mode mode) {
switch (mode) {
case STORE_BUFFER_ONLY:
ASSERT(GetMode(stub) == INCREMENTAL ||
GetMode(stub) == INCREMENTAL_COMPACTION);
stub->instruction_start()[0] = kTwoByteNopInstruction;
stub->instruction_start()[2] = kFiveByteNopInstruction;
break;
case INCREMENTAL:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
stub->instruction_start()[0] = kTwoByteJumpInstruction;
break;
case INCREMENTAL_COMPACTION:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
stub->instruction_start()[0] = kTwoByteNopInstruction;
stub->instruction_start()[2] = kFiveByteJumpInstruction;
break;
}
ASSERT(GetMode(stub) == mode);
CPU::FlushICache(stub->instruction_start(), 7);
}
private:
// This is a helper class for freeing up 3 scratch registers, where the third
// is always ecx (needed for shift operations). The input is two registers
// that must be preserved and one scratch register provided by the caller.
class RegisterAllocation {
public:
RegisterAllocation(Register object,
Register address,
Register scratch0)
: object_orig_(object),
address_orig_(address),
scratch0_orig_(scratch0),
object_(object),
address_(address),
scratch0_(scratch0) {
ASSERT(!AreAliased(scratch0, object, address, no_reg));
scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_);
if (scratch0.is(ecx)) {
scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_);
}
if (object.is(ecx)) {
object_ = GetRegThatIsNotEcxOr(address_, scratch0_, scratch1_);
}
if (address.is(ecx)) {
address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_);
}
ASSERT(!AreAliased(scratch0_, object_, address_, ecx));
}
void Save(MacroAssembler* masm) {
ASSERT(!address_orig_.is(object_));
ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
// We don't have to save scratch0_orig_ because it was given to us as
// a scratch register. But if we had to switch to a different reg then
// we should save the new scratch0_.
if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
if (!ecx.is(scratch0_orig_) &&
!ecx.is(object_orig_) &&
!ecx.is(address_orig_)) {
masm->push(ecx);
}
masm->push(scratch1_);
if (!address_.is(address_orig_)) {
masm->push(address_);
masm->mov(address_, address_orig_);
}
if (!object_.is(object_orig_)) {
masm->push(object_);
masm->mov(object_, object_orig_);
}
}
void Restore(MacroAssembler* masm) {
// These will have been preserved the entire time, so we just need to move
// them back. Only in one case is the orig_ reg different from the plain
// one, since only one of them can alias with ecx.
if (!object_.is(object_orig_)) {
masm->mov(object_orig_, object_);
masm->pop(object_);
}
if (!address_.is(address_orig_)) {
masm->mov(address_orig_, address_);
masm->pop(address_);
}
masm->pop(scratch1_);
if (!ecx.is(scratch0_orig_) &&
!ecx.is(object_orig_) &&
!ecx.is(address_orig_)) {
masm->pop(ecx);
}
if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
}
// If we have to call into C then we need to save and restore all caller-
// saved registers that were not already preserved. The caller saved
// registers are eax, ecx and edx. The three scratch registers (incl. ecx)
// will be restored by other means so we don't bother pushing them here.
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
if (mode == kSaveFPRegs) {
CpuFeatures::Scope scope(SSE2);
masm->sub(esp,
Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
// Save all XMM registers except XMM0.
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
masm->movdbl(Operand(esp, (i - 1) * kDoubleSize), reg);
}
}
}
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
CpuFeatures::Scope scope(SSE2);
// Restore all XMM registers except XMM0.
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize));
}
masm->add(esp,
Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
}
if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
}
inline Register object() { return object_; }
inline Register address() { return address_; }
inline Register scratch0() { return scratch0_; }
inline Register scratch1() { return scratch1_; }
private:
Register object_orig_;
Register address_orig_;
Register scratch0_orig_;
Register object_;
Register address_;
Register scratch0_;
Register scratch1_;
// Third scratch register is always ecx.
Register GetRegThatIsNotEcxOr(Register r1,
Register r2,
Register r3) {
for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(ecx)) continue;
if (candidate.is(r1)) continue;
if (candidate.is(r2)) continue;
if (candidate.is(r3)) continue;
return candidate;
}
UNREACHABLE();
return no_reg;
}
friend class RecordWriteStub;
};
enum OnNoNeedToInformIncrementalMarker {
kReturnOnNoNeedToInformIncrementalMarker,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
}
;
void Generate(MacroAssembler* masm);
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
Major MajorKey() { return RecordWrite; }
int MinorKey() {
return ObjectBits::encode(object_.code()) |
ValueBits::encode(value_.code()) |
AddressBits::encode(address_.code()) |
RememberedSetActionBits::encode(remembered_set_action_) |
SaveFPRegsModeBits::encode(save_fp_regs_mode_);
}
bool MustBeInStubCache() {
// All stubs must be registered in the stub cache
// otherwise IncrementalMarker would not be able to find
// and patch it.
return true;
}
void Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
class ObjectBits: public BitField<int, 0, 3> {};
class ValueBits: public BitField<int, 3, 3> {};
class AddressBits: public BitField<int, 6, 3> {};
class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {};
class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 10, 1> {};
Register object_;
Register value_;
Register address_;
RememberedSetAction remembered_set_action_;
SaveFPRegsMode save_fp_regs_mode_;
RegisterAllocation regs_;
};
} } // namespace v8::internal
#endif // V8_IA32_CODE_STUBS_IA32_H_

46
deps/v8/src/ia32/codegen-ia32.cc

@ -39,16 +39,12 @@ namespace internal {
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterFrame(StackFrame::INTERNAL);
ASSERT(!masm->has_frame());
masm->set_has_frame(true);
masm->EnterInternalFrame();
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->LeaveFrame(StackFrame::INTERNAL);
ASSERT(masm->has_frame());
masm->set_has_frame(false);
masm->LeaveInternalFrame();
}
@ -112,14 +108,14 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ mov(edx, dst);
__ and_(edx, 0xF);
__ neg(edx);
__ add(edx, Immediate(16));
__ add(dst, edx);
__ add(src, edx);
__ sub(count, edx);
__ add(Operand(edx), Immediate(16));
__ add(dst, Operand(edx));
__ add(src, Operand(edx));
__ sub(Operand(count), edx);
// edi is now aligned. Check if esi is also aligned.
Label unaligned_source;
__ test(src, Immediate(0x0F));
__ test(Operand(src), Immediate(0x0F));
__ j(not_zero, &unaligned_source);
{
// Copy loop for aligned source and destination.
@ -134,11 +130,11 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ prefetch(Operand(src, 0x20), 1);
__ movdqa(xmm0, Operand(src, 0x00));
__ movdqa(xmm1, Operand(src, 0x10));
__ add(src, Immediate(0x20));
__ add(Operand(src), Immediate(0x20));
__ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1);
__ add(dst, Immediate(0x20));
__ add(Operand(dst), Immediate(0x20));
__ dec(loop_count);
__ j(not_zero, &loop);
@ -146,12 +142,12 @@ OS::MemCopyFunction CreateMemCopyFunction() {
// At most 31 bytes to copy.
Label move_less_16;
__ test(count, Immediate(0x10));
__ test(Operand(count), Immediate(0x10));
__ j(zero, &move_less_16);
__ movdqa(xmm0, Operand(src, 0));
__ add(src, Immediate(0x10));
__ add(Operand(src), Immediate(0x10));
__ movdqa(Operand(dst, 0), xmm0);
__ add(dst, Immediate(0x10));
__ add(Operand(dst), Immediate(0x10));
__ bind(&move_less_16);
// At most 15 bytes to copy. Copy 16 bytes at end of string.
@ -180,11 +176,11 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ prefetch(Operand(src, 0x20), 1);
__ movdqu(xmm0, Operand(src, 0x00));
__ movdqu(xmm1, Operand(src, 0x10));
__ add(src, Immediate(0x20));
__ add(Operand(src), Immediate(0x20));
__ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1);
__ add(dst, Immediate(0x20));
__ add(Operand(dst), Immediate(0x20));
__ dec(loop_count);
__ j(not_zero, &loop);
@ -192,12 +188,12 @@ OS::MemCopyFunction CreateMemCopyFunction() {
// At most 31 bytes to copy.
Label move_less_16;
__ test(count, Immediate(0x10));
__ test(Operand(count), Immediate(0x10));
__ j(zero, &move_less_16);
__ movdqu(xmm0, Operand(src, 0));
__ add(src, Immediate(0x10));
__ add(Operand(src), Immediate(0x10));
__ movdqa(Operand(dst, 0), xmm0);
__ add(dst, Immediate(0x10));
__ add(Operand(dst), Immediate(0x10));
__ bind(&move_less_16);
// At most 15 bytes to copy. Copy 16 bytes at end of string.
@ -232,10 +228,10 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ mov(edx, dst);
__ and_(edx, 0x03);
__ neg(edx);
__ add(edx, Immediate(4)); // edx = 4 - (dst & 3)
__ add(dst, edx);
__ add(src, edx);
__ sub(count, edx);
__ add(Operand(edx), Immediate(4)); // edx = 4 - (dst & 3)
__ add(dst, Operand(edx));
__ add(src, Operand(edx));
__ sub(Operand(count), edx);
// edi is now aligned, ecx holds number of remaning bytes to copy.
__ mov(edx, count);

13
deps/v8/src/ia32/debug-ia32.cc

@ -100,8 +100,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList non_object_regs,
bool convert_call_to_jmp) {
// Enter an internal frame.
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
// Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values
@ -134,8 +133,8 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
CEntryStub ceb(1);
__ CallStub(&ceb);
// Restore the register values containing object pointers from the
// expression stack.
// Restore the register values containing object pointers from the expression
// stack.
for (int i = kNumJSCallerSaved; --i >= 0;) {
int r = JSCallerSavedCode(i);
Register reg = { r };
@ -152,12 +151,12 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
}
// Get rid of the internal frame.
}
__ LeaveInternalFrame();
// If this call did not replace a call but patched other code then there will
// be an unwanted return address left on the stack. Here we get rid of that.
if (convert_call_to_jmp) {
__ add(esp, Immediate(kPointerSize));
__ add(Operand(esp), Immediate(kPointerSize));
}
// Now that the break point has been handled, resume normal execution by
@ -299,7 +298,7 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
// Re-run JSFunction, edi is function, esi is context.
__ jmp(edx);
__ jmp(Operand(edx));
}
const bool Debug::kFrameDropperSupported = true;

94
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -116,7 +116,7 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
new_reloc->GetDataStartAddress() + padding, 0);
intptr_t comment_string
= reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL);
RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string);
for (int i = 0; i < additional_comments; ++i) {
#ifdef DEBUG
byte* pos_before = reloc_info_writer.pos();
@ -174,8 +174,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// We use RUNTIME_ENTRY for deoptimization bailouts.
RelocInfo rinfo(curr_address + 1, // 1 after the call opcode.
RelocInfo::RUNTIME_ENTRY,
reinterpret_cast<intptr_t>(deopt_entry),
NULL);
reinterpret_cast<intptr_t>(deopt_entry));
reloc_info_writer.Write(&rinfo);
ASSERT_GE(reloc_info_writer.pos(),
reloc_info->address() + ByteArray::kHeaderSize);
@ -206,11 +205,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
node->set_next(data->deoptimizing_code_list_);
data->deoptimizing_code_list_ = node;
// We might be in the middle of incremental marking with compaction.
// Tell collector to treat this code object in a special way and
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(code);
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
@ -227,8 +221,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
Address call_target_address = pc_after - kIntSize;
@ -257,13 +250,6 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
*(call_target_address - 2) = 0x90; // nop
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
RelocInfo rinfo(call_target_address,
RelocInfo::CODE_TARGET,
0,
unoptimized_code);
unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode(
unoptimized_code, &rinfo, replacement_code);
}
@ -282,9 +268,6 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
*(call_target_address - 2) = 0x07; // offset
Assembler::set_target_address_at(call_target_address,
check_code->entry());
check_code->GetHeap()->incremental_marking()->
RecordCodeTargetPatch(call_target_address, check_code);
}
@ -432,14 +415,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
// Setup the frame pointer and the context pointer.
// All OSR stack frames are dynamically aligned to an 8-byte boundary.
int frame_pointer = input_->GetRegister(ebp.code());
if ((frame_pointer & 0x4) == 0) {
// Return address at FP + 4 should be aligned, so FP mod 8 should be 4.
frame_pointer -= kPointerSize;
has_alignment_padding_ = 1;
}
output_[0]->SetRegister(ebp.code(), frame_pointer);
output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code()));
output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
unsigned pc_offset = data->OsrPcOffset()->value();
@ -504,11 +480,9 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// top address and the current frame's size.
uint32_t top_address;
if (is_bottommost) {
// If the optimized frame had alignment padding, adjust the frame pointer
// to point to the new position of the old frame pointer after padding
// is removed. Subtract 2 * kPointerSize for the context and function slots.
top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
height_in_bytes + has_alignment_padding_ * kPointerSize;
// 2 = context and function in the frame.
top_address =
input_->GetRegister(ebp.code()) - (2 * kPointerSize) - height_in_bytes;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
}
@ -559,9 +533,7 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
}
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
ASSERT(!is_bottommost ||
input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize
== fp_value);
ASSERT(!is_bottommost || input_->GetRegister(ebp.code()) == fp_value);
output_frame->SetFp(fp_value);
if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
if (FLAG_trace_deopt) {
@ -666,7 +638,7 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize = kDoubleSize *
XMMRegister::kNumAllocatableRegisters;
__ sub(esp, Immediate(kDoubleRegsSize));
__ sub(Operand(esp), Immediate(kDoubleRegsSize));
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
@ -690,7 +662,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
__ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
}
__ sub(edx, ebp);
__ sub(edx, Operand(ebp));
__ neg(edx);
// Allocate a new deoptimizer object.
@ -703,10 +675,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
__ mov(Operand(esp, 5 * kPointerSize),
Immediate(ExternalReference::isolate_address()));
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
}
// Preserve deoptimizer object in register eax and get the input
// frame descriptor pointer.
@ -729,15 +698,15 @@ void Deoptimizer::EntryGenerator::Generate() {
// Remove the bailout id and the double registers from the stack.
if (type() == EAGER) {
__ add(esp, Immediate(kDoubleRegsSize + kPointerSize));
__ add(Operand(esp), Immediate(kDoubleRegsSize + kPointerSize));
} else {
__ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
__ add(Operand(esp), Immediate(kDoubleRegsSize + 2 * kPointerSize));
}
// Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame.
__ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
__ add(ecx, esp);
__ add(ecx, Operand(esp));
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
@ -746,43 +715,18 @@ void Deoptimizer::EntryGenerator::Generate() {
Label pop_loop;
__ bind(&pop_loop);
__ pop(Operand(edx, 0));
__ add(edx, Immediate(sizeof(uint32_t)));
__ cmp(ecx, esp);
__ add(Operand(edx), Immediate(sizeof(uint32_t)));
__ cmp(ecx, Operand(esp));
__ j(not_equal, &pop_loop);
// If frame was dynamically aligned, pop padding.
Label sentinel, sentinel_done;
__ pop(ecx);
__ cmp(ecx, Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
__ j(equal, &sentinel);
__ push(ecx);
__ jmp(&sentinel_done);
__ bind(&sentinel);
__ mov(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
Immediate(1));
__ bind(&sentinel_done);
// Compute the output frame in the deoptimizer.
__ push(eax);
__ PrepareCallCFunction(1, ebx);
__ mov(Operand(esp, 0 * kPointerSize), eax);
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate), 1);
}
__ pop(eax);
if (type() == OSR) {
// If alignment padding is added, push the sentinel.
Label no_osr_padding;
__ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
Immediate(0));
__ j(equal, &no_osr_padding, Label::kNear);
__ push(Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
__ bind(&no_osr_padding);
}
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop;
// Outer loop state: eax = current FrameDescription**, edx = one past the
@ -795,12 +739,12 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(ebx, Operand(eax, 0));
__ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
__ bind(&inner_push_loop);
__ sub(ecx, Immediate(sizeof(uint32_t)));
__ sub(Operand(ecx), Immediate(sizeof(uint32_t)));
__ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
__ test(ecx, ecx);
__ test(ecx, Operand(ecx));
__ j(not_zero, &inner_push_loop);
__ add(eax, Immediate(kPointerSize));
__ cmp(eax, edx);
__ add(Operand(eax), Immediate(kPointerSize));
__ cmp(eax, Operand(edx));
__ j(below, &outer_push_loop);
// In case of OSR, we have to restore the XMM registers.

29
deps/v8/src/ia32/disasm-ia32.cc

@ -55,7 +55,6 @@ struct ByteMnemonic {
static const ByteMnemonic two_operands_instr[] = {
{0x01, "add", OPER_REG_OP_ORDER},
{0x03, "add", REG_OPER_OP_ORDER},
{0x09, "or", OPER_REG_OP_ORDER},
{0x0B, "or", REG_OPER_OP_ORDER},
@ -118,19 +117,6 @@ static const ByteMnemonic short_immediate_instr[] = {
};
// Generally we don't want to generate these because they are subject to partial
// register stalls. They are included for completeness and because the cmp
// variant is used by the RecordWrite stub. Because it does not update the
// register it is not subject to partial register stalls.
static ByteMnemonic byte_immediate_instr[] = {
{0x0c, "or", UNSET_OP_ORDER},
{0x24, "and", UNSET_OP_ORDER},
{0x34, "xor", UNSET_OP_ORDER},
{0x3c, "cmp", UNSET_OP_ORDER},
{-1, "", UNSET_OP_ORDER}
};
static const char* const jump_conditional_mnem[] = {
/*0*/ "jo", "jno", "jc", "jnc",
/*4*/ "jz", "jnz", "jna", "ja",
@ -163,8 +149,7 @@ enum InstructionType {
REGISTER_INSTR,
MOVE_REG_INSTR,
CALL_JUMP_INSTR,
SHORT_IMMEDIATE_INSTR,
BYTE_IMMEDIATE_INSTR
SHORT_IMMEDIATE_INSTR
};
@ -213,7 +198,6 @@ void InstructionTable::Init() {
CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
CopyTable(call_jump_instr, CALL_JUMP_INSTR);
CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
CopyTable(byte_immediate_instr, BYTE_IMMEDIATE_INSTR);
AddJumpConditionalShort();
SetTableRange(REGISTER_INSTR, 0x40, 0x47, "inc");
SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec");
@ -928,12 +912,6 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
}
case BYTE_IMMEDIATE_INSTR: {
AppendToBuffer("%s al, 0x%x", idesc.mnem, data[1]);
data += 2;
break;
}
case NO_INSTR:
processed = false;
break;
@ -1368,6 +1346,11 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += 2;
break;
case 0x2C:
AppendToBuffer("subb eax,0x%x", *reinterpret_cast<uint8_t*>(data+1));
data += 2;
break;
case 0xA9:
AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1));
data += 5;

352
deps/v8/src/ia32/full-codegen-ia32.cc

@ -138,7 +138,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// function calls.
if (info->is_strict_mode() || info->is_native()) {
Label ok;
__ test(ecx, ecx);
__ test(ecx, Operand(ecx));
__ j(zero, &ok, Label::kNear);
// +1 for return address.
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
@ -147,11 +147,6 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ bind(&ok);
}
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
@ -205,12 +200,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
__ mov(Operand(esi, context_offset), eax);
// Update the write barrier. This clobbers eax and ebx.
__ RecordWriteContextSlot(esi,
context_offset,
eax,
ebx,
kDontSaveFPRegs);
// Update the write barrier. This clobbers all involved
// registers, so we have use a third register to avoid
// clobbering esi.
__ mov(ecx, esi);
__ RecordWrite(ecx, context_offset, eax, ebx);
}
}
}
@ -266,7 +260,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
int ignored = 0;
EmitDeclaration(scope()->function(), CONST, NULL, &ignored);
EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
}
VisitDeclarations(scope()->declarations());
}
@ -371,10 +365,10 @@ void FullCodeGenerator::EmitReturnSequence() {
void FullCodeGenerator::verify_stack_height() {
ASSERT(FLAG_verify_stack_height);
__ sub(ebp, Immediate(kPointerSize * stack_height()));
__ cmp(ebp, esp);
__ sub(Operand(ebp), Immediate(kPointerSize * stack_height()));
__ cmp(ebp, Operand(esp));
__ Assert(equal, "Full codegen stack height not as expected.");
__ add(ebp, Immediate(kPointerSize * stack_height()));
__ add(Operand(ebp), Immediate(kPointerSize * stack_height()));
}
@ -603,7 +597,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
ToBooleanStub stub(result_register());
__ push(result_register());
__ CallStub(&stub, condition->test_id());
__ test(result_register(), result_register());
__ test(result_register(), Operand(result_register()));
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
}
@ -667,12 +661,11 @@ void FullCodeGenerator::SetVar(Variable* var,
ASSERT(!scratch1.is(src));
MemOperand location = VarOperand(var, scratch0);
__ mov(location, src);
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
int offset = Context::SlotOffset(var->index());
ASSERT(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi));
__ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
__ RecordWrite(scratch0, offset, src, scratch1);
}
}
@ -704,7 +697,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
VariableMode mode,
Variable::Mode mode,
FunctionLiteral* function,
int* global_count) {
// If it was not possible to allocate the variable at compile time, we
@ -722,7 +715,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ mov(StackOperand(variable), result_register());
} else if (mode == CONST || mode == LET) {
} else if (mode == Variable::CONST || mode == Variable::LET) {
Comment cmnt(masm_, "[ Declaration");
__ mov(StackOperand(variable),
Immediate(isolate()->factory()->the_hole_value()));
@ -745,16 +738,11 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function);
__ mov(ContextOperand(esi, variable->index()), result_register());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(esi,
Context::SlotOffset(variable->index()),
result_register(),
ecx,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
int offset = Context::SlotOffset(variable->index());
__ mov(ebx, esi);
__ RecordWrite(ebx, offset, result_register(), ecx);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
} else if (mode == CONST || mode == LET) {
} else if (mode == Variable::CONST || mode == Variable::LET) {
Comment cmnt(masm_, "[ Declaration");
__ mov(ContextOperand(esi, variable->index()),
Immediate(isolate()->factory()->the_hole_value()));
@ -768,8 +756,10 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
__ push(esi);
__ push(Immediate(variable->name()));
// Declaration nodes are always introduced in one of three modes.
ASSERT(mode == VAR || mode == CONST || mode == LET);
PropertyAttributes attr = (mode == CONST) ? READ_ONLY : NONE;
ASSERT(mode == Variable::VAR ||
mode == Variable::CONST ||
mode == Variable::LET);
PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
__ push(Immediate(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
@ -778,7 +768,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
increment_stack_height(3);
if (function != NULL) {
VisitForStackValue(function);
} else if (mode == CONST || mode == LET) {
} else if (mode == Variable::CONST || mode == Variable::LET) {
__ push(Immediate(isolate()->factory()->the_hole_value()));
increment_stack_height();
} else {
@ -845,10 +835,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
if (inline_smi_code) {
Label slow_case;
__ mov(ecx, edx);
__ or_(ecx, eax);
__ or_(ecx, Operand(eax));
patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
__ cmp(edx, eax);
__ cmp(edx, Operand(eax));
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
@ -860,7 +850,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
__ call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
__ test(eax, eax);
__ test(eax, Operand(eax));
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
@ -949,7 +939,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// For all objects but the receiver, check that the cache is empty.
Label check_prototype;
__ cmp(ecx, eax);
__ cmp(ecx, Operand(eax));
__ j(equal, &check_prototype, Label::kNear);
__ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
__ cmp(edx, isolate()->factory()->empty_fixed_array());
@ -1031,9 +1021,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(ecx); // Enumerable.
__ push(ebx); // Current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
__ test(eax, eax);
__ test(eax, Operand(eax));
__ j(equal, loop_statement.continue_label());
__ mov(ebx, eax);
__ mov(ebx, Operand(eax));
// Update the 'each' property or variable from the possibly filtered
// entry in register ebx.
@ -1057,7 +1047,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Remove the pointers stored on the stack.
__ bind(loop_statement.break_label());
__ add(esp, Immediate(5 * kPointerSize));
__ add(Operand(esp), Immediate(5 * kPointerSize));
decrement_stack_height(ForIn::kElementCount);
// Exit and decrement the loop depth.
@ -1199,22 +1189,16 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
if (var->mode() == DYNAMIC_GLOBAL) {
if (var->mode() == Variable::DYNAMIC_GLOBAL) {
EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
__ jmp(done);
} else if (var->mode() == DYNAMIC_LOCAL) {
} else if (var->mode() == Variable::DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
if (local->mode() == CONST ||
local->mode() == LET) {
if (local->mode() == Variable::CONST) {
__ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, done);
if (local->mode() == CONST) {
__ mov(eax, isolate()->factory()->undefined_value());
} else { // LET
__ push(Immediate(var->name()));
__ CallRuntime(Runtime::kThrowReferenceError, 1);
}
}
__ jmp(done);
}
@ -1247,7 +1231,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Comment cmnt(masm_, var->IsContextSlot()
? "Context variable"
: "Stack variable");
if (var->mode() != LET && var->mode() != CONST) {
if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
context()->Plug(var);
} else {
// Let and const need a read barrier.
@ -1255,10 +1239,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
GetVar(eax, var);
__ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, &done, Label::kNear);
if (var->mode() == LET) {
if (var->mode() == Variable::LET) {
__ push(Immediate(var->name()));
__ CallRuntime(Runtime::kThrowReferenceError, 1);
} else { // CONST
} else { // Variable::CONST
__ mov(eax, isolate()->factory()->undefined_value());
}
__ bind(&done);
@ -1496,18 +1480,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ mov(FieldOperand(ebx, offset), result_register());
Label no_map_change;
__ JumpIfSmi(result_register(), &no_map_change);
// Update the write barrier for the array store.
__ RecordWriteField(ebx, offset, result_register(), ecx,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
__ CheckFastSmiOnlyElements(edi, &no_map_change, Label::kNear);
__ push(Operand(esp, 0));
__ CallRuntime(Runtime::kNonSmiElementStored, 1);
__ bind(&no_map_change);
__ RecordWrite(ebx, offset, result_register(), ecx);
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
@ -1667,7 +1641,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ pop(edx);
decrement_stack_height();
__ mov(ecx, eax);
__ or_(eax, edx);
__ or_(eax, Operand(edx));
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear);
@ -1717,32 +1691,32 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
break;
}
case Token::ADD:
__ add(eax, ecx);
__ add(eax, Operand(ecx));
__ j(overflow, &stub_call);
break;
case Token::SUB:
__ sub(eax, ecx);
__ sub(eax, Operand(ecx));
__ j(overflow, &stub_call);
break;
case Token::MUL: {
__ SmiUntag(eax);
__ imul(eax, ecx);
__ imul(eax, Operand(ecx));
__ j(overflow, &stub_call);
__ test(eax, eax);
__ test(eax, Operand(eax));
__ j(not_zero, &done, Label::kNear);
__ mov(ebx, edx);
__ or_(ebx, ecx);
__ or_(ebx, Operand(ecx));
__ j(negative, &stub_call);
break;
}
case Token::BIT_OR:
__ or_(eax, ecx);
__ or_(eax, Operand(ecx));
break;
case Token::BIT_AND:
__ and_(eax, ecx);
__ and_(eax, Operand(ecx));
break;
case Token::BIT_XOR:
__ xor_(eax, ecx);
__ xor_(eax, Operand(ecx));
break;
default:
UNREACHABLE();
@ -1864,7 +1838,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
} else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
__ push(eax); // Value.
@ -1885,12 +1859,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ mov(location, eax);
if (var->IsContextSlot()) {
__ mov(edx, eax);
int offset = Context::SlotOffset(var->index());
__ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
__ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx);
}
}
} else if (var->mode() != CONST) {
} else if (var->mode() != Variable::CONST) {
// Assignment to var or initializing assignment to let.
if (var->IsStackAllocated() || var->IsContextSlot()) {
MemOperand location = VarOperand(var, ecx);
@ -1904,8 +1877,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ mov(location, eax);
if (var->IsContextSlot()) {
__ mov(edx, eax);
int offset = Context::SlotOffset(var->index());
__ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
__ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx);
}
} else {
ASSERT(var->IsLookupSlot());
@ -2097,29 +2069,8 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
}
// Record source position for debugger.
SetSourcePosition(expr->position());
// Record call targets in unoptimized code, but not in the snapshot.
bool record_call_target = !Serializer::enabled();
if (record_call_target) {
flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
}
CallFunctionStub stub(arg_count, flags);
__ CallStub(&stub);
if (record_call_target) {
// There is a one element cache in the instruction stream.
#ifdef DEBUG
int return_site_offset = masm()->pc_offset();
#endif
Handle<Object> uninitialized =
CallFunctionStub::UninitializedSentinel(isolate());
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
__ test(eax, Immediate(cell));
// Patching code in the stub assumes the opcode is 1 byte and there is
// word for a pointer in the operand.
ASSERT(masm()->pc_offset() - return_site_offset >= 1 + kPointerSize);
}
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@ -2143,8 +2094,10 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
// Push the strict mode flag. In harmony mode every eval call
// is a strict mode eval call.
StrictModeFlag strict_mode =
FLAG_harmony_scoping ? kStrictMode : strict_mode_flag();
StrictModeFlag strict_mode = strict_mode_flag();
if (FLAG_harmony_block_scoping) {
strict_mode = kStrictMode;
}
__ push(Immediate(Smi::FromInt(strict_mode)));
__ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
@ -2187,7 +2140,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// context lookup in the runtime system.
Label done;
Variable* var = proxy->var();
if (!var->IsUnallocated() && var->mode() == DYNAMIC_GLOBAL) {
if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
Label slow;
EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
// Push the function and resolve eval.
@ -2485,7 +2438,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
STATIC_ASSERT(kPointerSize == 4);
__ lea(ecx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
// Calculate location of the first key name.
__ add(ebx,
__ add(Operand(ebx),
Immediate(FixedArray::kHeaderSize +
DescriptorArray::kFirstIndex * kPointerSize));
// Loop through all the keys in the descriptor array. If one of these is the
@ -2496,9 +2449,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ mov(edx, FieldOperand(ebx, 0));
__ cmp(edx, FACTORY->value_of_symbol());
__ j(equal, if_false);
__ add(ebx, Immediate(kPointerSize));
__ add(Operand(ebx), Immediate(kPointerSize));
__ bind(&entry);
__ cmp(ebx, ecx);
__ cmp(ebx, Operand(ecx));
__ j(not_equal, &loop);
// Reload map as register ebx was used as temporary above.
@ -2638,7 +2591,7 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
__ pop(ebx);
decrement_stack_height();
__ cmp(eax, ebx);
__ cmp(eax, Operand(ebx));
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@ -2694,24 +2647,20 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
// Assume that there are only two callable types, and one of them is at
// either end of the type range for JS object types. Saves extra comparisons.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax);
// Map is now in eax.
__ j(below, &null);
STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
FIRST_SPEC_OBJECT_TYPE + 1);
__ j(equal, &function);
__ CmpInstanceType(eax, LAST_SPEC_OBJECT_TYPE);
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
LAST_SPEC_OBJECT_TYPE - 1);
__ j(equal, &function);
// Assume that there is no larger type.
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
// As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
// FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
// LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
__ CmpInstanceType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
__ j(above_equal, &function);
// Check if the constructor in the map is a function.
__ mov(eax, FieldOperand(eax, Map::kConstructorOffset));
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
__ j(not_equal, &non_function_constructor);
@ -2792,8 +2741,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
__ movd(xmm1, ebx);
__ movd(xmm0, eax);
__ movd(xmm1, Operand(ebx));
__ movd(xmm0, Operand(eax));
__ cvtss2sd(xmm1, xmm1);
__ xorps(xmm0, xmm1);
__ subsd(xmm0, xmm1);
@ -2894,11 +2843,10 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
// Store the value.
__ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
__ mov(edx, eax);
__ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx, kDontSaveFPRegs);
__ RecordWrite(ebx, JSValue::kValueOffset, edx, ecx);
__ bind(&done);
context()->Plug(eax);
@ -3171,14 +3119,14 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ mov(index_1, Operand(esp, 1 * kPointerSize));
__ mov(index_2, Operand(esp, 0));
__ mov(temp, index_1);
__ or_(temp, index_2);
__ or_(temp, Operand(index_2));
__ JumpIfNotSmi(temp, &slow_case);
// Check that both indices are valid.
__ mov(temp, FieldOperand(object, JSArray::kLengthOffset));
__ cmp(temp, index_1);
__ cmp(temp, Operand(index_1));
__ j(below_equal, &slow_case);
__ cmp(temp, index_2);
__ cmp(temp, Operand(index_2));
__ j(below_equal, &slow_case);
// Bring addresses into index1 and index2.
@ -3191,35 +3139,16 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ mov(Operand(index_2, 0), object);
__ mov(Operand(index_1, 0), temp);
Label no_remembered_set;
__ CheckPageFlag(elements,
temp,
1 << MemoryChunk::SCAN_ON_SCAVENGE,
not_zero,
&no_remembered_set,
Label::kNear);
// Possible optimization: do a check that both values are Smis
// (or them and test against Smi mask.)
// We are swapping two objects in an array and the incremental marker never
// pauses in the middle of scanning a single object. Therefore the
// incremental marker is not disturbed, so we don't need to call the
// RecordWrite stub that notifies the incremental marker.
__ RememberedSetHelper(elements,
index_1,
temp,
kDontSaveFPRegs,
MacroAssembler::kFallThroughAtEnd);
__ RememberedSetHelper(elements,
index_2,
temp,
kDontSaveFPRegs,
MacroAssembler::kFallThroughAtEnd);
__ bind(&no_remembered_set);
Label new_space;
__ InNewSpace(elements, temp, equal, &new_space);
__ mov(object, elements);
__ RecordWriteHelper(object, index_1, temp);
__ RecordWriteHelper(elements, index_2, temp);
__ bind(&new_space);
// We are done. Drop elements from the stack, and return undefined.
__ add(esp, Immediate(3 * kPointerSize));
__ add(Operand(esp), Immediate(3 * kPointerSize));
__ mov(eax, isolate()->factory()->undefined_value());
__ jmp(&done);
@ -3292,11 +3221,11 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
__ pop(left);
Label done, fail, ok;
__ cmp(left, right);
__ cmp(left, Operand(right));
__ j(equal, &ok);
// Fail if either is a non-HeapObject.
__ mov(tmp, left);
__ and_(tmp, right);
__ and_(Operand(tmp), right);
__ JumpIfSmi(tmp, &fail);
__ mov(tmp, FieldOperand(left, HeapObject::kMapOffset));
__ CmpInstanceType(tmp, JS_REGEXP_TYPE);
@ -3387,7 +3316,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
Operand separator_operand = Operand(esp, 2 * kPointerSize);
Operand result_operand = Operand(esp, 1 * kPointerSize);
Operand array_length_operand = Operand(esp, 0);
__ sub(esp, Immediate(2 * kPointerSize));
__ sub(Operand(esp), Immediate(2 * kPointerSize));
__ cld();
// Check that the array is a JSArray
__ JumpIfSmi(array, &bailout);
@ -3423,7 +3352,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
// Live loop registers: index, array_length, string,
// scratch, string_length, elements.
if (FLAG_debug_code) {
__ cmp(index, array_length);
__ cmp(index, Operand(array_length));
__ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin");
}
__ bind(&loop);
@ -3441,8 +3370,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ add(string_length,
FieldOperand(string, SeqAsciiString::kLengthOffset));
__ j(overflow, &bailout);
__ add(index, Immediate(1));
__ cmp(index, array_length);
__ add(Operand(index), Immediate(1));
__ cmp(index, Operand(array_length));
__ j(less, &loop);
// If array_length is 1, return elements[0], a string.
@ -3476,10 +3405,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
// to string_length.
__ mov(scratch, separator_operand);
__ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
__ sub(string_length, scratch); // May be negative, temporarily.
__ sub(string_length, Operand(scratch)); // May be negative, temporarily.
__ imul(scratch, array_length_operand);
__ j(overflow, &bailout);
__ add(string_length, scratch);
__ add(string_length, Operand(scratch));
__ j(overflow, &bailout);
__ shr(string_length, 1);
@ -3520,7 +3449,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(index, Immediate(1));
__ add(Operand(index), Immediate(1));
__ bind(&loop_1_condition);
__ cmp(index, array_length_operand);
__ j(less, &loop_1); // End while (index < length).
@ -3561,7 +3490,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(index, Immediate(1));
__ add(Operand(index), Immediate(1));
__ cmp(index, array_length_operand);
__ j(less, &loop_2); // End while (index < length).
@ -3602,7 +3531,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(index, Immediate(1));
__ add(Operand(index), Immediate(1));
__ cmp(index, array_length_operand);
__ j(less, &loop_3); // End while (index < length).
@ -3614,7 +3543,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ bind(&done);
__ mov(eax, result_operand);
// Drop temp values from the stack, and restore context register.
__ add(esp, Immediate(3 * kPointerSize));
__ add(Operand(esp), Immediate(3 * kPointerSize));
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
decrement_stack_height();
@ -3894,9 +3823,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (ShouldInlineSmiCase(expr->op())) {
if (expr->op() == Token::INC) {
__ add(eax, Immediate(Smi::FromInt(1)));
__ add(Operand(eax), Immediate(Smi::FromInt(1)));
} else {
__ sub(eax, Immediate(Smi::FromInt(1)));
__ sub(Operand(eax), Immediate(Smi::FromInt(1)));
}
__ j(overflow, &stub_call, Label::kNear);
// We could eliminate this smi check if we split the code at
@ -3906,9 +3835,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ bind(&stub_call);
// Call stub. Undo operation first.
if (expr->op() == Token::INC) {
__ sub(eax, Immediate(Smi::FromInt(1)));
__ sub(Operand(eax), Immediate(Smi::FromInt(1)));
} else {
__ add(eax, Immediate(Smi::FromInt(1)));
__ add(Operand(eax), Immediate(Smi::FromInt(1)));
}
}
@ -4027,14 +3956,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Handle<String> check) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
Handle<String> check,
Label* if_true,
Label* if_false,
Label* fall_through) {
{ AccumulatorValueContext context(this);
VisitForTypeofValue(expr);
}
@ -4073,11 +3998,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Split(not_zero, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(eax, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, edx);
__ j(equal, if_true);
__ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE);
Split(equal, if_true, if_false, fall_through);
__ CmpObjectType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, edx);
Split(above_equal, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(eax, if_false);
if (!FLAG_harmony_typeof) {
@ -4095,7 +4017,18 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else {
if (if_false != fall_through) __ jmp(if_false);
}
context()->Plug(if_true, if_false);
}
void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
Label* if_true,
Label* if_false,
Label* fall_through) {
VisitForAccumulatorValue(expr);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ cmp(eax, isolate()->factory()->undefined_value());
Split(equal, if_true, if_false, fall_through);
}
@ -4103,12 +4036,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
SetSourcePosition(expr->position());
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
if (TryLiteralCompare(expr)) return;
// Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed.
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@ -4116,9 +4046,16 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
context()->Plug(if_true, if_false);
return;
}
Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (op) {
switch (expr->op()) {
case Token::IN:
VisitForStackValue(expr->right());
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
@ -4134,7 +4071,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ CallStub(&stub);
decrement_stack_height(2);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ test(eax, eax);
__ test(eax, Operand(eax));
// The stub returns 0 for true.
Split(zero, if_true, if_false, fall_through);
break;
@ -4180,10 +4117,10 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
__ mov(ecx, edx);
__ or_(ecx, eax);
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax));
patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
__ cmp(edx, eax);
__ cmp(edx, Operand(eax));
Split(cc, if_true, if_false, NULL);
__ bind(&slow_case);
}
@ -4195,7 +4132,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ test(eax, eax);
__ test(eax, Operand(eax));
Split(cc, if_true, if_false, fall_through);
}
}
@ -4206,9 +4143,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Expression* sub_expr,
NilValue nil) {
void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
@ -4216,20 +4151,15 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
VisitForAccumulatorValue(sub_expr);
VisitForAccumulatorValue(expr->expression());
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Handle<Object> nil_value = nil == kNullValue ?
isolate()->factory()->null_value() :
isolate()->factory()->undefined_value();
__ cmp(eax, nil_value);
if (expr->op() == Token::EQ_STRICT) {
__ cmp(eax, isolate()->factory()->null_value());
if (expr->is_strict()) {
Split(equal, if_true, if_false, fall_through);
} else {
Handle<Object> other_nil_value = nil == kNullValue ?
isolate()->factory()->undefined_value() :
isolate()->factory()->null_value();
__ j(equal, if_true);
__ cmp(eax, other_nil_value);
__ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, if_true);
__ JumpIfSmi(eax, if_false);
// It can be an undetectable object.
@ -4296,7 +4226,7 @@ void FullCodeGenerator::EnterFinallyBlock() {
// Cook return address on top of stack (smi encoded Code* delta)
ASSERT(!result_register().is(edx));
__ pop(edx);
__ sub(edx, Immediate(masm_->CodeObject()));
__ sub(Operand(edx), Immediate(masm_->CodeObject()));
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ SmiTag(edx);
@ -4312,8 +4242,8 @@ void FullCodeGenerator::ExitFinallyBlock() {
// Uncook return address.
__ pop(edx);
__ SmiUntag(edx);
__ add(edx, Immediate(masm_->CodeObject()));
__ jmp(edx);
__ add(Operand(edx), Immediate(masm_->CodeObject()));
__ jmp(Operand(edx));
}

130
deps/v8/src/ia32/ic-ia32.cc

@ -212,7 +212,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Update write barrier. Make sure not to clobber the value.
__ mov(r1, value);
__ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
__ RecordWrite(elements, r0, r1);
}
@ -326,7 +326,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// Fast case: Do the load.
STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
__ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
__ cmp(scratch, Immediate(FACTORY->the_hole_value()));
__ cmp(Operand(scratch), Immediate(FACTORY->the_hole_value()));
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ j(equal, out_of_range);
@ -394,8 +394,8 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Check if element is in the range of mapped arguments. If not, jump
// to the unmapped lookup with the parameter map in scratch1.
__ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
__ sub(scratch2, Immediate(Smi::FromInt(2)));
__ cmp(key, scratch2);
__ sub(Operand(scratch2), Immediate(Smi::FromInt(2)));
__ cmp(key, Operand(scratch2));
__ j(greater_equal, unmapped_case);
// Load element index and check whether it is the hole.
@ -432,7 +432,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
__ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
__ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
__ cmp(key, scratch);
__ cmp(key, Operand(scratch));
__ j(greater_equal, slow_case);
return FieldOperand(backing_store,
key,
@ -534,7 +534,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ shr(ecx, KeyedLookupCache::kMapHashShift);
__ mov(edi, FieldOperand(eax, String::kHashFieldOffset));
__ shr(edi, String::kHashShift);
__ xor_(ecx, edi);
__ xor_(ecx, Operand(edi));
__ and_(ecx, KeyedLookupCache::kCapacityMask);
// Load the key (consisting of map and symbol) from the cache and
@ -545,7 +545,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ shl(edi, kPointerSizeLog2 + 1);
__ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow);
__ add(edi, Immediate(kPointerSize));
__ add(Operand(edi), Immediate(kPointerSize));
__ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow);
@ -559,12 +559,12 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(edi,
Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
__ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
__ sub(edi, ecx);
__ sub(edi, Operand(ecx));
__ j(above_equal, &property_array_property);
// Load in-object property.
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ add(ecx, edi);
__ add(ecx, Operand(edi));
__ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
@ -651,8 +651,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// Check that it has indexed interceptor and access checks
// are not enabled for this object.
__ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset));
__ and_(ecx, Immediate(kSlowCaseBitFieldMask));
__ cmp(ecx, Immediate(1 << Map::kHasIndexedInterceptor));
__ and_(Operand(ecx), Immediate(kSlowCaseBitFieldMask));
__ cmp(Operand(ecx), Immediate(1 << Map::kHasIndexedInterceptor));
__ j(not_zero, &slow);
// Everything is fine, call runtime.
@ -710,7 +710,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ mov(mapped_location, eax);
__ lea(ecx, mapped_location);
__ mov(edx, eax);
__ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs);
__ RecordWrite(ebx, ecx, edx);
__ Ret();
__ bind(&notin);
// The unmapped lookup expects that the parameter map is in ebx.
@ -719,7 +719,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ mov(unmapped_location, eax);
__ lea(edi, unmapped_location);
__ mov(edx, eax);
__ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
__ RecordWrite(ebx, edi, edx);
__ Ret();
__ bind(&slow);
GenerateMiss(masm, false);
@ -734,9 +734,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label slow, fast_object_with_map_check, fast_object_without_map_check;
Label fast_double_with_map_check, fast_double_without_map_check;
Label check_if_double_array, array, extra;
Label slow, fast, array, extra;
// Check that the object isn't a smi.
__ JumpIfSmi(edx, &slow);
@ -752,18 +750,22 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ CmpInstanceType(edi, JS_ARRAY_TYPE);
__ j(equal, &array);
// Check that the object is some kind of JSObject.
__ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
__ CmpInstanceType(edi, FIRST_JS_RECEIVER_TYPE);
__ j(below, &slow);
__ CmpInstanceType(edi, JS_PROXY_TYPE);
__ j(equal, &slow);
__ CmpInstanceType(edi, JS_FUNCTION_PROXY_TYPE);
__ j(equal, &slow);
// Object case: Check key against length in the elements array.
// eax: value
// edx: JSObject
// ecx: key (a smi)
// edi: receiver map
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
// Check array bounds. Both the key and the length of FixedArray are smis.
__ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
__ j(below, &fast_object_with_map_check);
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode and writable.
__ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
__ j(below, &fast);
// Slow case: call runtime.
__ bind(&slow);
@ -776,28 +778,16 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// eax: value
// edx: receiver, a JSArray
// ecx: key, a smi.
// ebx: receiver->elements, a FixedArray
// edi: receiver map
// edi: receiver->elements, a FixedArray
// flags: compare (ecx, edx.length())
// do not leave holes in the array:
__ j(not_equal, &slow);
__ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
__ j(above_equal, &slow);
__ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
__ cmp(edi, masm->isolate()->factory()->fixed_array_map());
__ j(not_equal, &check_if_double_array);
// Add 1 to receiver->length, and go to common element store code for Objects.
// Add 1 to receiver->length, and go to fast array write.
__ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ jmp(&fast_object_without_map_check);
__ bind(&check_if_double_array);
__ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
__ j(not_equal, &slow);
// Add 1 to receiver->length, and go to common element store code for doubles.
__ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ jmp(&fast_double_without_map_check);
__ jmp(&fast);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
@ -806,54 +796,24 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// eax: value
// edx: receiver, a JSArray
// ecx: key, a smi.
// edi: receiver map
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
// Check the key against the length in the array and fall through to the
// common store code.
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
__ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // Compare smis.
__ j(above_equal, &extra);
// Fast case: Do the store, could either Object or double.
__ bind(&fast_object_with_map_check);
// Fast case: Do the store.
__ bind(&fast);
// eax: value
// ecx: key (a smi)
// edx: receiver
// ebx: FixedArray receiver->elements
// edi: receiver map
__ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
__ cmp(edi, masm->isolate()->factory()->fixed_array_map());
__ j(not_equal, &fast_double_with_map_check);
__ bind(&fast_object_without_map_check);
// Smi stores don't require further checks.
Label non_smi_value;
__ JumpIfNotSmi(eax, &non_smi_value);
// It's irrelevant whether array is smi-only or not when writing a smi.
__ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
__ ret(0);
__ bind(&non_smi_value);
// Escape to slow case when writing non-smi into smi-only array.
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
__ CheckFastObjectElements(edi, &slow, Label::kNear);
// Fast elements array, store the value to the elements backing store.
__ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
// edi: FixedArray receiver->elements
__ mov(CodeGenerator::FixedArrayElementOperand(edi, ecx), eax);
// Update write barrier for the elements array address.
__ mov(edx, eax); // Preserve the value which is returned.
__ RecordWriteArray(
ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ ret(0);
__ bind(&fast_double_with_map_check);
// Check for fast double array case. If this fails, call through to the
// runtime.
__ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
__ j(not_equal, &slow);
__ bind(&fast_double_without_map_check);
// If the value is a number, store it as a double in the FastDoubleElements
// array.
__ StoreNumberToDoubleElements(eax, ebx, ecx, edx, xmm0, &slow, false);
__ mov(edx, Operand(eax));
__ RecordWrite(edi, 0, edx, ecx);
__ ret(0);
}
@ -991,8 +951,8 @@ static void GenerateCallMiss(MacroAssembler* masm,
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Enter an internal frame.
__ EnterInternalFrame();
// Push the receiver and the name of the function.
__ push(edx);
@ -1006,7 +966,7 @@ static void GenerateCallMiss(MacroAssembler* masm,
// Move result to edi and exit the internal frame.
__ mov(edi, eax);
}
__ LeaveInternalFrame();
// Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC.
@ -1151,17 +1111,13 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial.
__ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ EnterInternalFrame();
__ push(ecx); // save the key
__ push(edx); // pass the receiver
__ push(ecx); // pass the key
__ CallRuntime(Runtime::kKeyedGetProperty, 2);
__ pop(ecx); // restore the key
// Leave the internal frame.
}
__ LeaveInternalFrame();
__ mov(edi, eax);
__ jmp(&do_call);

237
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -70,17 +70,6 @@ bool LCodeGen::GenerateCode() {
ASSERT(is_unused());
status_ = GENERATING;
CpuFeatures::Scope scope(SSE2);
CodeStub::GenerateFPStubs();
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
dynamic_frame_alignment_ = chunk()->num_double_slots() > 2 ||
info()->osr_ast_id() != AstNode::kNoNumber;
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
@ -155,29 +144,6 @@ bool LCodeGen::GeneratePrologue() {
__ bind(&ok);
}
if (dynamic_frame_alignment_) {
Label do_not_pad, align_loop;
STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
// Align esp to a multiple of 2 * kPointerSize.
__ test(esp, Immediate(kPointerSize));
__ j(zero, &do_not_pad, Label::kNear);
__ push(Immediate(0));
__ mov(ebx, esp);
// Copy arguments, receiver, and return address.
__ mov(ecx, Immediate(scope()->num_parameters() + 2));
__ bind(&align_loop);
__ mov(eax, Operand(ebx, 1 * kPointerSize));
__ mov(Operand(ebx, 0), eax);
__ add(Operand(ebx), Immediate(kPointerSize));
__ dec(ecx);
__ j(not_zero, &align_loop, Label::kNear);
__ mov(Operand(ebx, 0),
Immediate(isolate()->factory()->frame_alignment_marker()));
__ bind(&do_not_pad);
}
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
@ -238,12 +204,11 @@ bool LCodeGen::GeneratePrologue() {
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
__ mov(Operand(esi, context_offset), eax);
// Update the write barrier. This clobbers eax and ebx.
__ RecordWriteContextSlot(esi,
context_offset,
eax,
ebx,
kDontSaveFPRegs);
// Update the write barrier. This clobbers all involved
// registers, so we have to use a third register to avoid
// clobbering esi.
__ mov(ecx, esi);
__ RecordWrite(ecx, context_offset, eax, ebx);
}
}
Comment(";;; End allocate local context");
@ -295,9 +260,6 @@ bool LCodeGen::GenerateDeferredCode() {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
Comment(";;; Deferred code @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
code->Generate();
__ jmp(code->exit());
}
@ -519,18 +481,14 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr,
LOperand* context) {
ASSERT(context->IsRegister() || context->IsStackSlot());
if (context->IsRegister()) {
if (!ToRegister(context).is(esi)) {
__ mov(esi, ToRegister(context));
}
} else if (context->IsStackSlot()) {
__ mov(esi, ToOperand(context));
} else if (context->IsConstantOperand()) {
Handle<Object> literal =
chunk_->LookupLiteral(LConstantOperand::cast(context));
LoadHeapObject(esi, Handle<Context>::cast(literal));
} else {
UNREACHABLE();
// Context is stack slot.
__ mov(esi, ToOperand(context));
}
__ CallRuntimeSaveDoubles(id);
@ -711,7 +669,7 @@ void LCodeGen::RecordSafepoint(
int arguments,
int deoptimization_index) {
ASSERT(kind == expected_safepoint_kind_);
const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
kind, arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
@ -1242,13 +1200,8 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
Register reg = ToRegister(instr->result());
Handle<Object> handle = instr->value();
if (handle->IsHeapObject()) {
LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
} else {
__ Set(reg, Immediate(handle));
}
ASSERT(instr->result()->IsRegister());
__ Set(ToRegister(instr->result()), Immediate(instr->value()));
}
@ -1624,33 +1577,23 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
}
void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
int false_block = chunk_->LookupDestination(instr->false_block_id());
// If the expression is known to be untagged or a smi, then it's definitely
// not null, and it can't be a an undetectable object.
if (instr->hydrogen()->representation().IsSpecialization() ||
instr->hydrogen()->type().IsSmi()) {
EmitGoto(false_block);
return;
}
// TODO(fsc): If the expression is known to be a smi, then it's
// definitely not null. Jump to the false block.
int true_block = chunk_->LookupDestination(instr->true_block_id());
Handle<Object> nil_value = instr->nil() == kNullValue ?
factory()->null_value() :
factory()->undefined_value();
__ cmp(reg, nil_value);
if (instr->kind() == kStrictEquality) {
int false_block = chunk_->LookupDestination(instr->false_block_id());
__ cmp(reg, factory()->null_value());
if (instr->is_strict()) {
EmitBranch(true_block, false_block, equal);
} else {
Handle<Object> other_nil_value = instr->nil() == kNullValue ?
factory()->undefined_value() :
factory()->null_value();
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ j(equal, true_label);
__ cmp(reg, other_nil_value);
__ cmp(reg, factory()->undefined_value());
__ j(equal, true_label);
__ JumpIfSmi(reg, false_label);
// Check for undetectable objects by looking in the bit field in
@ -1802,36 +1745,28 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
ASSERT(!input.is(temp));
ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
__ JumpIfSmi(input, is_false);
if (class_name->IsEqualTo(CStrVector("Function"))) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
FIRST_SPEC_OBJECT_TYPE + 1);
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
LAST_SPEC_OBJECT_TYPE - 1);
STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
__ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
__ j(below, is_false);
__ j(equal, is_true);
__ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
__ j(equal, is_true);
// Map is now in temp.
// Functions have class 'Function'.
__ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) {
__ j(above_equal, is_true);
} else {
// Faster code path to avoid two compares: subtract lower bound from the
// actual type and do a signed compare with the width of the type range.
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
__ mov(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
__ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ cmpb(Operand(temp2),
static_cast<int8_t>(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(above, is_false);
__ j(above_equal, is_false);
}
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
__ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
// As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
// FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
// LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
if (class_name->IsEqualTo(CStrVector("Object"))) {
@ -1916,8 +1851,9 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
virtual void Generate() {
codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
}
virtual LInstruction* instr() { return instr_; }
Label* map_check() { return &map_check_; }
private:
LInstanceOfKnownGlobal* instr_;
Label map_check_;
@ -2055,17 +1991,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
__ mov(esp, ebp);
__ pop(ebp);
if (dynamic_frame_alignment_) {
Label aligned;
// Frame alignment marker (padding) is below arguments,
// and receiver, so its return-address-relative offset is
// (num_arguments + 2) words.
__ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize),
Immediate(factory()->frame_alignment_marker()));
__ j(not_equal, &aligned);
__ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
__ bind(&aligned);
}
__ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
}
@ -2073,7 +1998,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
__ mov(result, Operand::Cell(instr->hydrogen()->cell()));
if (instr->hydrogen()->RequiresHoleCheck()) {
if (instr->hydrogen()->check_hole_value()) {
__ cmp(result, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
@ -2094,34 +2019,20 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register object = ToRegister(instr->TempAt(0));
Register address = ToRegister(instr->TempAt(1));
Register value = ToRegister(instr->InputAt(0));
ASSERT(!value.is(object));
Handle<JSGlobalPropertyCell> cell_handle(instr->hydrogen()->cell());
int offset = JSGlobalPropertyCell::kValueOffset;
__ mov(object, Immediate(cell_handle));
Operand cell_operand = Operand::Cell(instr->hydrogen()->cell());
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(FieldOperand(object, offset), factory()->the_hole_value());
if (instr->hydrogen()->check_hole_value()) {
__ cmp(cell_operand, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
// Store the value.
__ mov(FieldOperand(object, offset), value);
// Cells are always in the remembered set.
__ RecordWriteField(object,
offset,
value,
address,
kSaveFPRegs,
OMIT_REMEMBERED_SET);
__ mov(cell_operand, value);
}
@ -2152,7 +2063,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
if (instr->needs_write_barrier()) {
Register temp = ToRegister(instr->TempAt(0));
int offset = Context::SlotOffset(instr->slot_index());
__ RecordWriteContextSlot(context, offset, value, temp, kSaveFPRegs);
__ RecordWrite(context, offset, value, temp);
}
}
@ -2369,6 +2280,7 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
LLoadKeyedFastDoubleElement* instr) {
XMMRegister result = ToDoubleRegister(instr->result());
if (instr->hydrogen()->RequiresHoleCheck()) {
int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
sizeof(kHoleNanLower32);
Operand hole_check_operand = BuildFastArrayOperand(
@ -2377,6 +2289,7 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
offset);
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
DeoptimizeIf(equal, instr->environment());
}
Operand double_load_operand = BuildFastArrayOperand(
instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
@ -2446,7 +2359,6 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
@ -2768,7 +2680,6 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
virtual void Generate() {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
virtual LInstruction* instr() { return instr_; }
private:
LUnaryMathOperation* instr_;
};
@ -3094,7 +3005,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ Drop(1);
}
@ -3151,7 +3062,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->needs_write_barrier()) {
Register temp = ToRegister(instr->TempAt(0));
// Update the write barrier for the object for in-object properties.
__ RecordWriteField(object, offset, value, temp, kSaveFPRegs);
__ RecordWrite(object, offset, value, temp);
}
} else {
Register temp = ToRegister(instr->TempAt(0));
@ -3160,7 +3071,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->needs_write_barrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
__ RecordWriteField(temp, offset, value, object, kSaveFPRegs);
__ RecordWrite(temp, offset, value, object);
}
}
}
@ -3219,7 +3130,6 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
@ -3236,13 +3146,6 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register elements = ToRegister(instr->object());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
// This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
// conversion, so it deopts in that case.
if (instr->hydrogen()->ValueNeedsSmiCheck()) {
__ test(value, Immediate(kSmiTagMask));
DeoptimizeIf(not_zero, instr->environment());
}
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@ -3265,7 +3168,7 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
key,
times_pointer_size,
FixedArray::kHeaderSize));
__ RecordWrite(elements, key, value, kSaveFPRegs);
__ RecordWrite(elements, key, value);
}
}
@ -3309,7 +3212,6 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LStringCharCodeAt* instr_;
};
@ -3432,7 +3334,6 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LStringCharFromCode* instr_;
};
@ -3512,7 +3413,6 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LNumberTagI* instr_;
};
@ -3580,7 +3480,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LNumberTagD* instr_;
};
@ -3682,6 +3581,16 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
}
class DeferredTaggedToI: public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
private:
LTaggedToI* instr_;
};
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Label done, heap_number;
Register input_reg = ToRegister(instr->InputAt(0));
@ -3763,16 +3672,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
class DeferredTaggedToI: public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LTaggedToI* instr_;
};
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
@ -3983,16 +3882,9 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
Handle<JSFunction> target = instr->hydrogen()->target();
if (isolate()->heap()->InNewSpace(*target)) {
Register reg = ToRegister(instr->value());
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(target);
__ cmp(reg, Operand::Cell(cell));
} else {
Operand operand = ToOperand(instr->value());
ASSERT(instr->InputAt(0)->IsRegister());
Operand operand = ToOperand(instr->InputAt(0));
__ cmp(operand, instr->hydrogen()->target());
}
DeoptimizeIf(not_equal, instr->environment());
}
@ -4296,12 +4188,10 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = not_zero;
} else if (type_name->Equals(heap()->function_symbol())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
__ JumpIfSmi(input, false_label);
__ CmpObjectType(input, JS_FUNCTION_TYPE, input);
__ j(equal, true_label);
__ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
final_branch_condition = equal;
__ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
final_branch_condition = above_equal;
} else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label);
@ -4413,7 +4303,6 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LStackCheck* instr_;
};

13
deps/v8/src/ia32/lithium-codegen-ia32.h

@ -58,7 +58,6 @@ class LCodeGen BASE_EMBEDDED {
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
dynamic_frame_alignment_(false),
deferred_(8),
osr_pc_offset_(-1),
deoptimization_reloc_size(),
@ -134,10 +133,6 @@ class LCodeGen BASE_EMBEDDED {
int strict_mode_flag() const {
return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
}
bool dynamic_frame_alignment() const { return dynamic_frame_alignment_; }
void set_dynamic_frame_alignment(bool value) {
dynamic_frame_alignment_ = value;
}
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
@ -302,7 +297,6 @@ class LCodeGen BASE_EMBEDDED {
int inlined_function_count_;
Scope* const scope_;
Status status_;
bool dynamic_frame_alignment_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
@ -352,20 +346,16 @@ class LCodeGen BASE_EMBEDDED {
class LDeferredCode: public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen),
external_exit_(NULL),
instruction_index_(codegen->current_instruction_) {
: codegen_(codegen), external_exit_(NULL) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() { }
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label *exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
@ -376,7 +366,6 @@ class LDeferredCode: public ZoneObject {
Label entry_;
Label exit_;
Label* external_exit_;
int instruction_index_;
};
} } // namespace v8::internal

64
deps/v8/src/ia32/lithium-ia32.cc

@ -214,11 +214,10 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
}
void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
InputAt(0)->PrintTo(stream);
stream->Add(kind() == kStrictEquality ? " === " : " == ");
stream->Add(nil() == kNullValue ? "null" : "undefined");
stream->Add(is_strict() ? " === null" : " == null");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
@ -352,11 +351,7 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
int LChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
if (is_double) {
spill_slot_count_ |= 1; // Make it odd, so incrementing makes it even.
spill_slot_count_++;
num_double_slots_++;
}
if (is_double) spill_slot_count_++;
return spill_slot_count_++;
}
@ -712,9 +707,7 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
instr->set_environment(CreateEnvironment(hydrogen_env,
&argument_index_accumulator));
instr->set_environment(CreateEnvironment(hydrogen_env));
return instr;
}
@ -1001,13 +994,10 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
LEnvironment* LChunkBuilder::CreateEnvironment(
HEnvironment* hydrogen_env,
int* argument_index_accumulator) {
LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
if (hydrogen_env == NULL) return NULL;
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
int value_count = hydrogen_env->length();
@ -1017,6 +1007,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
argument_count_,
value_count,
outer);
int argument_index = 0;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@ -1025,7 +1016,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
op = new LArgument((*argument_index_accumulator)++);
op = new LArgument(argument_index++);
} else {
op = UseAny(value);
}
@ -1480,10 +1471,10 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
}
LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
// We only need a temp register for non-strict compare.
LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
return new LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
LOperand* temp = instr->is_strict() ? NULL : TempRegister();
return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
}
@ -1692,13 +1683,7 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
// If the target is in new space, we'll emit a global cell compare and so
// want the value in a register. If the target gets promoted before we
// emit code, we will still get the register but will do an immediate
// compare instead of the cell compare. This is safe.
LOperand* value = Isolate::Current()->heap()->InNewSpace(*instr->target())
? UseRegisterAtStart(instr->value())
: UseAtStart(instr->value());
LOperand* value = UseAtStart(instr->value());
return AssignEnvironment(new LCheckFunction(value));
}
@ -1785,7 +1770,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LLoadGlobalCell* result = new LLoadGlobalCell;
return instr->RequiresHoleCheck()
return instr->check_hole_value()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
@ -1801,10 +1786,8 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LStoreGlobalCell* result =
new LStoreGlobalCell(UseTempRegister(instr->value()),
TempRegister(),
TempRegister());
return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
new LStoreGlobalCell(UseRegisterAtStart(instr->value()));
return instr->check_hole_value() ? AssignEnvironment(result) : result;
}
@ -1825,13 +1808,15 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
LOperand* context;
LOperand* value;
LOperand* temp;
LOperand* context = UseRegister(instr->context());
if (instr->NeedsWriteBarrier()) {
context = UseTempRegister(instr->context());
value = UseTempRegister(instr->value());
temp = TempRegister();
} else {
context = UseRegister(instr->context());
value = UseRegister(instr->value());
temp = NULL;
}
@ -1959,7 +1944,7 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
ASSERT(instr->object()->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32());
LOperand* obj = UseRegister(instr->object());
LOperand* obj = UseTempRegister(instr->object());
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
: UseRegisterAtStart(instr->value());
@ -2036,14 +2021,9 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* obj;
if (needs_write_barrier) {
obj = instr->is_in_object()
? UseRegister(instr->object())
: UseTempRegister(instr->object());
} else {
obj = UseRegisterAtStart(instr->object());
}
LOperand* obj = needs_write_barrier
? UseTempRegister(instr->object())
: UseRegisterAtStart(instr->object());
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())

28
deps/v8/src/ia32/lithium-ia32.h

@ -101,7 +101,7 @@ class LCodeGen;
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsNilAndBranch) \
V(IsNullAndBranch) \
V(IsObjectAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@ -615,18 +615,17 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
};
class LIsNilAndBranch: public LControlInstruction<1, 1> {
class LIsNullAndBranch: public LControlInstruction<1, 1> {
public:
LIsNilAndBranch(LOperand* value, LOperand* temp) {
LIsNullAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
EqualityKind kind() const { return hydrogen()->kind(); }
NilValue nil() const { return hydrogen()->nil(); }
bool is_strict() const { return hydrogen()->is_strict(); }
virtual void PrintDataTo(StringStream* stream);
};
@ -1231,12 +1230,10 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> {
};
class LStoreGlobalCell: public LTemplateInstruction<0, 1, 2> {
class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> {
public:
explicit LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
explicit LStoreGlobalCell(LOperand* value) {
inputs_[0] = value;
temps_[0] = temp1;
temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
@ -1801,8 +1798,6 @@ class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
};
@ -2075,7 +2070,6 @@ class LChunk: public ZoneObject {
graph_(graph),
instructions_(32),
pointer_maps_(8),
num_double_slots_(0),
inlined_closures_(1) { }
void AddInstruction(LInstruction* instruction, HBasicBlock* block);
@ -2089,8 +2083,6 @@ class LChunk: public ZoneObject {
int ParameterAt(int index);
int GetParameterStackSlot(int index) const;
int spill_slot_count() const { return spill_slot_count_; }
int num_double_slots() const { return num_double_slots_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
@ -2132,7 +2124,6 @@ class LChunk: public ZoneObject {
HGraph* const graph_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
int num_double_slots_;
ZoneList<Handle<JSFunction> > inlined_closures_;
};
@ -2268,8 +2259,7 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment();
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
int* argument_index_accumulator);
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
void VisitInstruction(HInstruction* current);

723
deps/v8/src/ia32/macro-assembler-ia32.cc

File diff suppressed because it is too large

218
deps/v8/src/ia32/macro-assembler-ia32.h

@ -29,7 +29,6 @@
#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
#include "assembler.h"
#include "frames.h"
#include "v8globals.h"
namespace v8 {
@ -51,13 +50,6 @@ enum AllocationFlags {
// distinguish memory operands from other operands on ia32.
typedef Operand MemOperand;
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
bool AreAliased(Register r1, Register r2, Register r3, Register r4);
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
@ -69,130 +61,42 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// GC Support
enum RememberedSetFinalAction {
kReturnAtEnd,
kFallThroughAtEnd
};
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
// in new space.
void RememberedSetHelper(Register object, // Used for debug code.
Register addr,
Register scratch,
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then);
void CheckPageFlag(Register object,
Register scratch,
int mask,
Condition cc,
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,
Register scratch,
Label* branch,
Label::Distance distance = Label::kFar) {
InNewSpace(object, scratch, zero, branch, distance);
}
// For page containing |object| mark region covering |addr| dirty.
// RecordWriteHelper only works if the object is not in new
// space.
void RecordWriteHelper(Register object,
Register addr,
Register scratch);
// Check if object is in new space. Jumps if the object is in new space.
// The register scratch can be object itself, but it will be clobbered.
void JumpIfInNewSpace(Register object,
// Check if object is in new space.
// scratch can be object itself, but it will be clobbered.
void InNewSpace(Register object,
Register scratch,
Condition cc, // equal for new space, not_equal otherwise.
Label* branch,
Label::Distance distance = Label::kFar) {
InNewSpace(object, scratch, not_zero, branch, distance);
}
Label::Distance branch_near = Label::kFar);
// Check if an object has a given incremental marking color. Also uses ecx!
void HasColor(Register object,
Register scratch0,
Register scratch1,
Label* has_color,
Label::Distance has_color_distance,
int first_bit,
int second_bit);
void JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black,
Label::Distance on_black_distance = Label::kFar);
// Checks the color of an object. If the object is already grey or black
// then we just fall through, since it is already live. If it is white and
// we can determine that it doesn't need to be scanned, then we just mark it
// black and fall through. For the rest we jump to the label so the
// incremental marker can fix its assumptions.
void EnsureNotWhite(Register object,
Register scratch1,
Register scratch2,
Label* object_is_white_and_not_data,
Label::Distance distance);
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldOperand(reg, off).
void RecordWriteField(
Register object,
int offset,
Register value,
Register scratch,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
// As above, but the offset has the tag presubtracted. For use with
// Operand(reg, off).
void RecordWriteContextSlot(
Register context,
int offset,
Register value,
Register scratch,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK) {
RecordWriteField(context,
offset + kHeapObjectTag,
value,
scratch,
save_fp,
remembered_set_action,
smi_check);
}
// Notify the garbage collector that we wrote a pointer into a fixed array.
// |array| is the array being stored into, |value| is the
// object being stored. |index| is the array index represented as a
// Smi. All registers are clobbered by the operation RecordWriteArray
// For page containing |object| mark region covering [object+offset]
// dirty. |object| is the object being stored into, |value| is the
// object being stored. If offset is zero, then the scratch register
// contains the array index into the elements array represented as a
// Smi. All registers are clobbered by the operation. RecordWrite
// filters out smis so it does not update the write barrier if the
// value is a smi.
void RecordWriteArray(
Register array,
void RecordWrite(Register object,
int offset,
Register value,
Register index,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
Register scratch);
// For page containing |object| mark region covering |address|
// dirty. |object| is the object being stored into, |value| is the
// object being stored. The address and value registers are clobbered by the
// object being stored. All registers are clobbered by the
// operation. RecordWrite filters out smis so it does not update the
// write barrier if the value is a smi.
void RecordWrite(
Register object,
void RecordWrite(Register object,
Register address,
Register value,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
Register value);
#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
@ -201,6 +105,15 @@ class MacroAssembler: public Assembler {
void DebugBreak();
#endif
// ---------------------------------------------------------------------------
// Activation frames
void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
// Enter specific kind of exit frame. Expects the number of
// arguments in register eax and sets up the number of arguments in
// register edi and the pointer to the first argument in register
@ -246,15 +159,6 @@ class MacroAssembler: public Assembler {
void SetCallKind(Register dst, CallKind kind);
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
InvokeCode(Operand(code), expected, actual, flag, call_wrapper, call_kind);
}
void InvokeCode(const Operand& code,
const ParameterCount& expected,
const ParameterCount& actual,
@ -321,29 +225,6 @@ class MacroAssembler: public Assembler {
Label* fail,
Label::Distance distance = Label::kFar);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map,
Label* fail,
Label::Distance distance = Label::kFar);
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
void CheckFastSmiOnlyElements(Register map,
Label* fail,
Label::Distance distance = Label::kFar);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
// the FastDoubleElements array elements, otherwise jump to fail.
void StoreNumberToDoubleElements(Register maybe_number,
Register elements,
Register key,
Register scratch1,
XMMRegister scratch2,
Label* fail,
bool specialize_for_processor);
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
// heap object)
@ -396,7 +277,7 @@ class MacroAssembler: public Assembler {
void SmiTag(Register reg) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
add(reg, reg);
add(reg, Operand(reg));
}
void SmiUntag(Register reg) {
sar(reg, kSmiTagSize);
@ -584,13 +465,6 @@ class MacroAssembler: public Assembler {
Register length,
Register scratch);
// Initialize fields with filler values. Fields starting at |start_offset|
// not including end_offset are overwritten with the value in |filler|. At
// the end the loop, |start_offset| takes the value of |end_offset|.
void InitializeFieldsWithFiller(Register start_offset,
Register end_offset,
Register filler);
// ---------------------------------------------------------------------------
// Support functions.
@ -793,9 +667,6 @@ class MacroAssembler: public Assembler {
bool generating_stub() { return generating_stub_; }
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
inline bool AllowThisStubCall(CodeStub* stub);
// ---------------------------------------------------------------------------
// String utilities.
@ -819,14 +690,9 @@ class MacroAssembler: public Assembler {
return SafepointRegisterStackIndex(reg.code());
}
// Activation support.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
private:
bool generating_stub_;
bool allow_stub_calls_;
bool has_frame_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
@ -837,10 +703,14 @@ class MacroAssembler: public Assembler {
const Operand& code_operand,
Label* done,
InvokeFlag flag,
Label::Distance done_distance,
Label::Distance done_near = Label::kFar,
const CallWrapper& call_wrapper = NullCallWrapper(),
CallKind call_kind = CALL_AS_METHOD);
// Activation support.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
void EnterExitFramePrologue();
void EnterExitFrameEpilogue(int argc, bool save_doubles);
@ -859,20 +729,6 @@ class MacroAssembler: public Assembler {
Register scratch,
bool gc_allowed);
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object,
Register scratch,
Condition cc,
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
// Helper for finding the mark bits for an address. Afterwards, the
// bitmap register points at the word with the mark bits and the mask
// the position of the first bit. Uses ecx as scratch and leaves addr_reg
// unchanged.
inline void GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg);
// Compute memory operands for safepoint stack slots.
Operand SafepointRegisterSlot(Register reg);

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save