Browse Source

Revert "Upgrade V8 to 3.6.6"

Not stable enough.
- Windows snapshot linking broken
- Linux crash on ./node_g test/simple/test-stream-pipe-multi.js

This reverts commit 56e6952e63.
v0.7.4-release
Ryan Dahl 13 years ago
parent
commit
3b1d656da5
  1. 55
      deps/v8/ChangeLog
  2. 45
      deps/v8/Makefile
  3. 11
      deps/v8/benchmarks/spinning-balls/index.html
  4. 326
      deps/v8/benchmarks/spinning-balls/splay-tree.js
  5. 387
      deps/v8/benchmarks/spinning-balls/v.js
  6. 12
      deps/v8/build/common.gypi
  7. 31
      deps/v8/build/standalone.gypi
  8. 5
      deps/v8/include/v8-debug.h
  9. 6
      deps/v8/include/v8-profiler.h
  10. 42
      deps/v8/include/v8.h
  11. 2
      deps/v8/src/SConscript
  12. 130
      deps/v8/src/api.cc
  13. 24
      deps/v8/src/arm/assembler-arm-inl.h
  14. 12
      deps/v8/src/arm/assembler-arm.cc
  15. 10
      deps/v8/src/arm/assembler-arm.h
  16. 1118
      deps/v8/src/arm/builtins-arm.cc
  17. 585
      deps/v8/src/arm/code-stubs-arm.cc
  18. 245
      deps/v8/src/arm/code-stubs-arm.h
  19. 8
      deps/v8/src/arm/codegen-arm.cc
  20. 10
      deps/v8/src/arm/codegen-arm.h
  21. 82
      deps/v8/src/arm/debug-arm.cc
  22. 34
      deps/v8/src/arm/deoptimizer-arm.cc
  23. 10
      deps/v8/src/arm/frames-arm.h
  24. 195
      deps/v8/src/arm/full-codegen-arm.cc
  25. 149
      deps/v8/src/arm/ic-arm.cc
  26. 38
      deps/v8/src/arm/lithium-arm.cc
  27. 16
      deps/v8/src/arm/lithium-arm.h
  28. 182
      deps/v8/src/arm/lithium-codegen-arm.cc
  29. 7
      deps/v8/src/arm/lithium-codegen-arm.h
  30. 566
      deps/v8/src/arm/macro-assembler-arm.cc
  31. 226
      deps/v8/src/arm/macro-assembler-arm.h
  32. 15
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  33. 2
      deps/v8/src/arm/simulator-arm.cc
  34. 324
      deps/v8/src/arm/stub-cache-arm.cc
  35. 151
      deps/v8/src/array.js
  36. 56
      deps/v8/src/assembler.cc
  37. 27
      deps/v8/src/assembler.h
  38. 154
      deps/v8/src/ast.cc
  39. 29
      deps/v8/src/ast.h
  40. 57
      deps/v8/src/bootstrapper.cc
  41. 146
      deps/v8/src/builtins.cc
  42. 12
      deps/v8/src/cached-powers.cc
  43. 45
      deps/v8/src/code-stubs.cc
  44. 117
      deps/v8/src/code-stubs.h
  45. 2
      deps/v8/src/codegen.cc
  46. 77
      deps/v8/src/compiler-intrinsics.h
  47. 3
      deps/v8/src/compiler.cc
  48. 100
      deps/v8/src/contexts.cc
  49. 41
      deps/v8/src/contexts.h
  50. 2
      deps/v8/src/conversions-inl.h
  51. 2
      deps/v8/src/conversions.h
  52. 2
      deps/v8/src/cpu-profiler.cc
  53. 5
      deps/v8/src/d8-debug.cc
  54. 34
      deps/v8/src/d8.cc
  55. 226
      deps/v8/src/debug.cc
  56. 90
      deps/v8/src/debug.h
  57. 70
      deps/v8/src/deoptimizer.cc
  58. 18
      deps/v8/src/deoptimizer.h
  59. 2
      deps/v8/src/disassembler.cc
  60. 11
      deps/v8/src/elements.cc
  61. 175
      deps/v8/src/execution.cc
  62. 13
      deps/v8/src/execution.h
  63. 7
      deps/v8/src/extensions/gc-extension.cc
  64. 96
      deps/v8/src/factory.cc
  65. 32
      deps/v8/src/factory.h
  66. 24
      deps/v8/src/flag-definitions.h
  67. 67
      deps/v8/src/frames-inl.h
  68. 117
      deps/v8/src/frames.cc
  69. 78
      deps/v8/src/frames.h
  70. 41
      deps/v8/src/full-codegen.cc
  71. 23
      deps/v8/src/full-codegen.h
  72. 6
      deps/v8/src/func-name-inferrer.h
  73. 4
      deps/v8/src/globals.h
  74. 13
      deps/v8/src/handles.cc
  75. 15
      deps/v8/src/handles.h
  76. 104
      deps/v8/src/heap-inl.h
  77. 1
      deps/v8/src/heap-profiler.cc
  78. 1531
      deps/v8/src/heap.cc
  79. 458
      deps/v8/src/heap.h
  80. 66
      deps/v8/src/hydrogen-instructions.cc
  81. 344
      deps/v8/src/hydrogen-instructions.h
  82. 408
      deps/v8/src/hydrogen.cc
  83. 27
      deps/v8/src/hydrogen.h
  84. 26
      deps/v8/src/ia32/assembler-ia32-inl.h
  85. 87
      deps/v8/src/ia32/assembler-ia32.cc
  86. 88
      deps/v8/src/ia32/assembler-ia32.h
  87. 1031
      deps/v8/src/ia32/builtins-ia32.cc
  88. 1112
      deps/v8/src/ia32/code-stubs-ia32.cc
  89. 291
      deps/v8/src/ia32/code-stubs-ia32.h
  90. 46
      deps/v8/src/ia32/codegen-ia32.cc
  91. 95
      deps/v8/src/ia32/debug-ia32.cc
  92. 100
      deps/v8/src/ia32/deoptimizer-ia32.cc
  93. 29
      deps/v8/src/ia32/disasm-ia32.cc
  94. 323
      deps/v8/src/ia32/full-codegen-ia32.cc
  95. 160
      deps/v8/src/ia32/ic-ia32.cc
  96. 255
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  97. 13
      deps/v8/src/ia32/lithium-codegen-ia32.h
  98. 64
      deps/v8/src/ia32/lithium-ia32.cc
  99. 28
      deps/v8/src/ia32/lithium-ia32.h
  100. 737
      deps/v8/src/ia32/macro-assembler-ia32.cc

55
deps/v8/ChangeLog

@ -1,58 +1,3 @@
2011-10-10: Version 3.6.6
Added a GC pause visualization tool.
Added presubmit=no and werror=no flags to Makefile.
ES5/Test262 conformance improvements.
Fixed compilation issues with GCC 4.5.x (issue 1743).
Bug fixes and performance improvements on all platforms.
2011-10-05: Version 3.6.5
New incremental garbage collector.
Removed the hard heap size limit (soft heap size limit is still
700/1400Mbytes by default).
Implemented ES5 generic Array.prototype.toString (Issue 1361).
V8 now allows surrogate pair codes in decodeURIComponent (Issue 1415).
Fixed x64 RegExp start-of-string bug (Issues 1746, 1748).
Fixed propertyIsEnumerable for numeric properties (Issue 1692).
Fixed the MinGW and Windows 2000 builds.
Fixed "Prototype chain is not searched if named property handler does
not set a property" (Issue 1636).
Made the RegExp.prototype object be a RegExp object (Issue 1217).
Disallowed future reserved words as labels in strict mode.
Fixed string split to correctly coerce the separator to a string
(Issue 1711).
API: Added an optional source length field to the Extension
constructor.
API: Added Debug::DisableAgent to match existing Debug::EnableAgent
(Issue 1573).
Added "native" target to Makefile for the benefit of Linux distros.
Fixed: debugger stops stepping outside evaluate (Issue 1639).
More work on ES-Harmony proxies. Still hidden behind a flag.
Bug fixes and performance improvements on all platforms.
2011-09-15: Version 3.6.4 2011-09-15: Version 3.6.4
Fixed d8's broken readline history. Fixed d8's broken readline history.

45
deps/v8/Makefile

@ -32,7 +32,6 @@ LINK ?= "g++"
OUTDIR ?= out OUTDIR ?= out
TESTJOBS ?= -j16 TESTJOBS ?= -j16
GYPFLAGS ?= GYPFLAGS ?=
TESTFLAGS ?=
# Special build flags. Use them like this: "make library=shared" # Special build flags. Use them like this: "make library=shared"
@ -51,10 +50,6 @@ endif
ifeq ($(disassembler), on) ifeq ($(disassembler), on)
GYPFLAGS += -Dv8_enable_disassembler=1 GYPFLAGS += -Dv8_enable_disassembler=1
endif endif
# objectprint=on
ifeq ($(objectprint), on)
GYPFLAGS += -Dv8_object_print=1
endif
# snapshot=off # snapshot=off
ifeq ($(snapshot), off) ifeq ($(snapshot), off)
GYPFLAGS += -Dv8_use_snapshot='false' GYPFLAGS += -Dv8_use_snapshot='false'
@ -77,21 +72,12 @@ endif
ifdef soname_version ifdef soname_version
GYPFLAGS += -Dsoname_version=$(soname_version) GYPFLAGS += -Dsoname_version=$(soname_version)
endif endif
# werror=no
ifeq ($(werror), no)
GYPFLAGS += -Dwerror=''
endif
# presubmit=no
ifeq ($(presubmit), no)
TESTFLAGS += --no-presubmit
endif
# ----------------- available targets: -------------------- # ----------------- available targets: --------------------
# - "dependencies": pulls in external dependencies (currently: GYP) # - "dependencies": pulls in external dependencies (currently: GYP)
# - any arch listed in ARCHES (see below) # - any arch listed in ARCHES (see below)
# - any mode listed in MODES # - any mode listed in MODES
# - every combination <arch>.<mode>, e.g. "ia32.release" # - every combination <arch>.<mode>, e.g. "ia32.release"
# - "native": current host's architecture, release mode
# - any of the above with .check appended, e.g. "ia32.release.check" # - any of the above with .check appended, e.g. "ia32.release.check"
# - default (no target specified): build all ARCHES and MODES # - default (no target specified): build all ARCHES and MODES
# - "check": build all targets and run all tests # - "check": build all targets and run all tests
@ -117,7 +103,7 @@ CHECKS = $(addsuffix .check,$(BUILDS))
# File where previously used GYPFLAGS are stored. # File where previously used GYPFLAGS are stored.
ENVFILE = $(OUTDIR)/environment ENVFILE = $(OUTDIR)/environment
.PHONY: all check clean dependencies $(ENVFILE).new native \ .PHONY: all check clean dependencies $(ENVFILE).new \
$(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \ $(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
$(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) $(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES))
@ -138,31 +124,21 @@ $(BUILDS): $(OUTDIR)/Makefile-$$(basename $$@)
python -c "print raw_input().capitalize()") \ python -c "print raw_input().capitalize()") \
builddir="$(shell pwd)/$(OUTDIR)/$@" builddir="$(shell pwd)/$(OUTDIR)/$@"
native: $(OUTDIR)/Makefile-native
@$(MAKE) -C "$(OUTDIR)" -f Makefile-native \
CXX="$(CXX)" LINK="$(LINK)" BUILDTYPE=Release \
builddir="$(shell pwd)/$(OUTDIR)/$@"
# Test targets. # Test targets.
check: all check: all
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \ @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR)
$(TESTFLAGS)
$(addsuffix .check,$(MODES)): $$(basename $$@) $(addsuffix .check,$(MODES)): $$(basename $$@)
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \ @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
--mode=$(basename $@) $(TESTFLAGS) --mode=$(basename $@)
$(addsuffix .check,$(ARCHES)): $$(basename $$@) $(addsuffix .check,$(ARCHES)): $$(basename $$@)
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \ @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch=$(basename $@) $(TESTFLAGS) --arch=$(basename $@)
$(CHECKS): $$(basename $$@) $(CHECKS): $$(basename $$@)
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \ @tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@) $(TESTFLAGS) --arch-and-mode=$(basename $@)
native.check: native
@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR)/native \
--arch-and-mode=. $(TESTFLAGS)
# Clean targets. You can clean each architecture individually, or everything. # Clean targets. You can clean each architecture individually, or everything.
$(addsuffix .clean,$(ARCHES)): $(addsuffix .clean,$(ARCHES)):
@ -171,12 +147,7 @@ $(addsuffix .clean,$(ARCHES)):
rm -rf $(OUTDIR)/$(basename $@).debug rm -rf $(OUTDIR)/$(basename $@).debug
find $(OUTDIR) -regex '.*\(host\|target\)-$(basename $@)\.mk' -delete find $(OUTDIR) -regex '.*\(host\|target\)-$(basename $@)\.mk' -delete
native.clean: clean: $(addsuffix .clean,$(ARCHES))
rm -f $(OUTDIR)/Makefile-native
rm -rf $(OUTDIR)/native
find $(OUTDIR) -regex '.*\(host\|target\)-native\.mk' -delete
clean: $(addsuffix .clean,$(ARCHES)) native.clean
# GYP file generation targets. # GYP file generation targets.
$(OUTDIR)/Makefile-ia32: $(GYPFILES) $(ENVFILE) $(OUTDIR)/Makefile-ia32: $(GYPFILES) $(ENVFILE)
@ -194,10 +165,6 @@ $(OUTDIR)/Makefile-arm: $(GYPFILES) $(ENVFILE)
-Ibuild/standalone.gypi --depth=. -Ibuild/armu.gypi \ -Ibuild/standalone.gypi --depth=. -Ibuild/armu.gypi \
-S-arm $(GYPFLAGS) -S-arm $(GYPFLAGS)
$(OUTDIR)/Makefile-native: $(GYPFILES) $(ENVFILE)
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -S-native $(GYPFLAGS)
# Replaces the old with the new environment file if they're different, which # Replaces the old with the new environment file if they're different, which
# will trigger GYP to regenerate Makefiles. # will trigger GYP to regenerate Makefiles.
$(ENVFILE): $(ENVFILE).new $(ENVFILE): $(ENVFILE).new

11
deps/v8/benchmarks/spinning-balls/index.html

@ -1,11 +0,0 @@
<html>
<head>
<style>
body { text-align: center; }
</style>
</head>
<body>
<script type="text/javascript" src="splay-tree.js"></script>
<script type="text/javascript" src="v.js"></script>
</body>
</html>

326
deps/v8/benchmarks/spinning-balls/splay-tree.js

@ -1,326 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/**
* Constructs a Splay tree. A splay tree is a self-balancing binary
* search tree with the additional property that recently accessed
* elements are quick to access again. It performs basic operations
* such as insertion, look-up and removal in O(log(n)) amortized time.
*
* @constructor
*/
function SplayTree() {
};
/**
* Pointer to the root node of the tree.
*
* @type {SplayTree.Node}
* @private
*/
SplayTree.prototype.root_ = null;
/**
* @return {boolean} Whether the tree is empty.
*/
SplayTree.prototype.isEmpty = function() {
return !this.root_;
};
/**
* Inserts a node into the tree with the specified key and value if
* the tree does not already contain a node with the specified key. If
* the value is inserted, it becomes the root of the tree.
*
* @param {number} key Key to insert into the tree.
* @param {*} value Value to insert into the tree.
*/
SplayTree.prototype.insert = function(key, value) {
if (this.isEmpty()) {
this.root_ = new SplayTree.Node(key, value);
return;
}
// Splay on the key to move the last node on the search path for
// the key to the root of the tree.
this.splay_(key);
if (this.root_.key == key) {
return;
}
var node = new SplayTree.Node(key, value);
if (key > this.root_.key) {
node.left = this.root_;
node.right = this.root_.right;
this.root_.right = null;
} else {
node.right = this.root_;
node.left = this.root_.left;
this.root_.left = null;
}
this.root_ = node;
};
/**
* Removes a node with the specified key from the tree if the tree
* contains a node with this key. The removed node is returned. If the
* key is not found, an exception is thrown.
*
* @param {number} key Key to find and remove from the tree.
* @return {SplayTree.Node} The removed node.
*/
SplayTree.prototype.remove = function(key) {
if (this.isEmpty()) {
throw Error('Key not found: ' + key);
}
this.splay_(key);
if (this.root_.key != key) {
throw Error('Key not found: ' + key);
}
var removed = this.root_;
if (!this.root_.left) {
this.root_ = this.root_.right;
} else {
var right = this.root_.right;
this.root_ = this.root_.left;
// Splay to make sure that the new root has an empty right child.
this.splay_(key);
// Insert the original right child as the right child of the new
// root.
this.root_.right = right;
}
return removed;
};
/**
* Returns the node having the specified key or null if the tree doesn't contain
* a node with the specified key.
*
* @param {number} key Key to find in the tree.
* @return {SplayTree.Node} Node having the specified key.
*/
SplayTree.prototype.find = function(key) {
if (this.isEmpty()) {
return null;
}
this.splay_(key);
return this.root_.key == key ? this.root_ : null;
};
/**
* @return {SplayTree.Node} Node having the maximum key value.
*/
SplayTree.prototype.findMax = function(opt_startNode) {
if (this.isEmpty()) {
return null;
}
var current = opt_startNode || this.root_;
while (current.right) {
current = current.right;
}
return current;
};
/**
* @return {SplayTree.Node} Node having the maximum key value that
* is less than the specified key value.
*/
SplayTree.prototype.findGreatestLessThan = function(key) {
if (this.isEmpty()) {
return null;
}
// Splay on the key to move the node with the given key or the last
// node on the search path to the top of the tree.
this.splay_(key);
// Now the result is either the root node or the greatest node in
// the left subtree.
if (this.root_.key < key) {
return this.root_;
} else if (this.root_.left) {
return this.findMax(this.root_.left);
} else {
return null;
}
};
/**
* @return {Array<*>} An array containing all the keys of tree's nodes.
*/
SplayTree.prototype.exportKeys = function() {
var result = [];
if (!this.isEmpty()) {
this.root_.traverse_(function(node) { result.push(node.key); });
}
return result;
};
/**
* Perform the splay operation for the given key. Moves the node with
* the given key to the top of the tree. If no node has the given
* key, the last node on the search path is moved to the top of the
* tree. This is the simplified top-down splaying algorithm from:
* "Self-adjusting Binary Search Trees" by Sleator and Tarjan
*
* @param {number} key Key to splay the tree on.
* @private
*/
SplayTree.prototype.splay_ = function(key) {
if (this.isEmpty()) {
return;
}
// Create a dummy node. The use of the dummy node is a bit
// counter-intuitive: The right child of the dummy node will hold
// the L tree of the algorithm. The left child of the dummy node
// will hold the R tree of the algorithm. Using a dummy node, left
// and right will always be nodes and we avoid special cases.
var dummy, left, right;
dummy = left = right = new SplayTree.Node(null, null);
var current = this.root_;
while (true) {
if (key < current.key) {
if (!current.left) {
break;
}
if (key < current.left.key) {
// Rotate right.
var tmp = current.left;
current.left = tmp.right;
tmp.right = current;
current = tmp;
if (!current.left) {
break;
}
}
// Link right.
right.left = current;
right = current;
current = current.left;
} else if (key > current.key) {
if (!current.right) {
break;
}
if (key > current.right.key) {
// Rotate left.
var tmp = current.right;
current.right = tmp.left;
tmp.left = current;
current = tmp;
if (!current.right) {
break;
}
}
// Link left.
left.right = current;
left = current;
current = current.right;
} else {
break;
}
}
// Assemble.
left.right = current.left;
right.left = current.right;
current.left = dummy.right;
current.right = dummy.left;
this.root_ = current;
};
/**
* Constructs a Splay tree node.
*
* @param {number} key Key.
* @param {*} value Value.
*/
SplayTree.Node = function(key, value) {
this.key = key;
this.value = value;
};
/**
* @type {SplayTree.Node}
*/
SplayTree.Node.prototype.left = null;
/**
* @type {SplayTree.Node}
*/
SplayTree.Node.prototype.right = null;
/**
* Performs an ordered traversal of the subtree starting at
* this SplayTree.Node.
*
* @param {function(SplayTree.Node)} f Visitor function.
* @private
*/
SplayTree.Node.prototype.traverse_ = function(f) {
var current = this;
while (current) {
var left = current.left;
if (left) left.traverse_(f);
f(current);
current = current.right;
}
};
SplayTree.prototype.traverseBreadthFirst = function (f) {
if (f(this.root_.value)) return;
var stack = [this.root_];
var length = 1;
while (length > 0) {
var new_stack = new Array(stack.length * 2);
var new_length = 0;
for (var i = 0; i < length; i++) {
var n = stack[i];
var l = n.left;
var r = n.right;
if (l) {
if (f(l.value)) return;
new_stack[new_length++] = l;
}
if (r) {
if (f(r.value)) return;
new_stack[new_length++] = r;
}
}
stack = new_stack;
length = new_length;
}
};

387
deps/v8/benchmarks/spinning-balls/v.js

@ -1,387 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/**
* This function provides requestAnimationFrame in a cross browser way.
* http://paulirish.com/2011/requestanimationframe-for-smart-animating/
*/
if ( !window.requestAnimationFrame ) {
window.requestAnimationFrame = ( function() {
return window.webkitRequestAnimationFrame ||
window.mozRequestAnimationFrame ||
window.oRequestAnimationFrame ||
window.msRequestAnimationFrame ||
function(callback, element) {
window.setTimeout( callback, 1000 / 60 );
};
} )();
}
var kNPoints = 8000;
var kNModifications = 20;
var kNVisiblePoints = 200;
var kDecaySpeed = 20;
var kPointRadius = 4;
var kInitialLifeForce = 100;
var livePoints = void 0;
var dyingPoints = void 0;
var scene = void 0;
var renderingStartTime = void 0;
var scene = void 0;
var pausePlot = void 0;
var splayTree = void 0;
function Point(x, y, z, payload) {
this.x = x;
this.y = y;
this.z = z;
this.next = null;
this.prev = null;
this.payload = payload;
this.lifeForce = kInitialLifeForce;
}
Point.prototype.color = function () {
return "rgba(0, 0, 0, " + (this.lifeForce / kInitialLifeForce) + ")";
};
Point.prototype.decay = function () {
this.lifeForce -= kDecaySpeed;
return this.lifeForce <= 0;
};
function PointsList() {
this.head = null;
this.count = 0;
}
PointsList.prototype.add = function (point) {
if (this.head !== null) this.head.prev = point;
point.next = this.head;
this.head = point;
this.count++;
}
PointsList.prototype.remove = function (point) {
if (point.next !== null) {
point.next.prev = point.prev;
}
if (point.prev !== null) {
point.prev.next = point.next;
} else {
this.head = point.next;
}
this.count--;
}
function GeneratePayloadTree(depth, tag) {
if (depth == 0) {
return {
array : [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ],
string : 'String for key ' + tag + ' in leaf node'
};
} else {
return {
left: GeneratePayloadTree(depth - 1, tag),
right: GeneratePayloadTree(depth - 1, tag)
};
}
}
// To make the benchmark results predictable, we replace Math.random
// with a 100% deterministic alternative.
Math.random = (function() {
var seed = 49734321;
return function() {
// Robert Jenkins' 32 bit integer hash function.
seed = ((seed + 0x7ed55d16) + (seed << 12)) & 0xffffffff;
seed = ((seed ^ 0xc761c23c) ^ (seed >>> 19)) & 0xffffffff;
seed = ((seed + 0x165667b1) + (seed << 5)) & 0xffffffff;
seed = ((seed + 0xd3a2646c) ^ (seed << 9)) & 0xffffffff;
seed = ((seed + 0xfd7046c5) + (seed << 3)) & 0xffffffff;
seed = ((seed ^ 0xb55a4f09) ^ (seed >>> 16)) & 0xffffffff;
return (seed & 0xfffffff) / 0x10000000;
};
})();
function GenerateKey() {
// The benchmark framework guarantees that Math.random is
// deterministic; see base.js.
return Math.random();
}
function CreateNewPoint() {
// Insert new node with a unique key.
var key;
do { key = GenerateKey(); } while (splayTree.find(key) != null);
var point = new Point(Math.random() * 40 - 20,
Math.random() * 40 - 20,
Math.random() * 40 - 20,
GeneratePayloadTree(5, "" + key));
livePoints.add(point);
splayTree.insert(key, point);
return key;
}
function ModifyPointsSet() {
if (livePoints.count < kNPoints) {
for (var i = 0; i < kNModifications; i++) {
CreateNewPoint();
}
} else if (kNModifications === 20) {
kNModifications = 80;
kDecay = 30;
}
for (var i = 0; i < kNModifications; i++) {
var key = CreateNewPoint();
var greatest = splayTree.findGreatestLessThan(key);
if (greatest == null) {
var point = splayTree.remove(key).value;
} else {
var point = splayTree.remove(greatest.key).value;
}
livePoints.remove(point);
point.payload = null;
dyingPoints.add(point);
}
}
function PausePlot(width, height, size) {
var canvas = document.createElement("canvas");
canvas.width = this.width = width;
canvas.height = this.height = height;
document.body.appendChild(canvas);
this.ctx = canvas.getContext('2d');
this.maxPause = 0;
this.size = size;
// Initialize cyclic buffer for pauses.
this.pauses = new Array(this.size);
this.start = this.size;
this.idx = 0;
}
PausePlot.prototype.addPause = function (p) {
if (this.idx === this.size) {
this.idx = 0;
}
if (this.idx === this.start) {
this.start++;
}
if (this.start === this.size) {
this.start = 0;
}
this.pauses[this.idx++] = p;
};
PausePlot.prototype.iteratePauses = function (f) {
if (this.start < this.idx) {
for (var i = this.start; i < this.idx; i++) {
f.call(this, i - this.start, this.pauses[i]);
}
} else {
for (var i = this.start; i < this.size; i++) {
f.call(this, i - this.start, this.pauses[i]);
}
var offs = this.size - this.start;
for (var i = 0; i < this.idx; i++) {
f.call(this, i + offs, this.pauses[i]);
}
}
};
PausePlot.prototype.draw = function () {
var first = null;
this.iteratePauses(function (i, v) {
if (first === null) {
first = v;
}
this.maxPause = Math.max(v, this.maxPause);
});
var dx = this.width / this.size;
var dy = this.height / this.maxPause;
this.ctx.save();
this.ctx.clearRect(0, 0, 480, 240);
this.ctx.beginPath();
this.ctx.moveTo(1, dy * this.pauses[this.start]);
var p = first;
this.iteratePauses(function (i, v) {
var delta = v - p;
var x = 1 + dx * i;
var y = dy * v;
this.ctx.lineTo(x, y);
if (delta > 2 * (p / 3)) {
this.ctx.font = "bold 12px sans-serif";
this.ctx.textBaseline = "bottom";
this.ctx.fillText(v + "ms", x + 2, y);
}
p = v;
});
this.ctx.strokeStyle = "black";
this.ctx.stroke();
this.ctx.restore();
}
function Scene(width, height) {
var canvas = document.createElement("canvas");
canvas.width = width;
canvas.height = height;
document.body.appendChild(canvas);
this.ctx = canvas.getContext('2d');
this.width = canvas.width;
this.height = canvas.height;
// Projection configuration.
this.x0 = canvas.width / 2;
this.y0 = canvas.height / 2;
this.z0 = 100;
this.f = 1000; // Focal length.
// Camera is rotating around y-axis.
this.angle = 0;
}
Scene.prototype.drawPoint = function (x, y, z, color) {
// Rotate the camera around y-axis.
var rx = x * Math.cos(this.angle) - z * Math.sin(this.angle);
var ry = y;
var rz = x * Math.sin(this.angle) + z * Math.cos(this.angle);
// Perform perspective projection.
var px = (this.f * rx) / (rz - this.z0) + this.x0;
var py = (this.f * ry) / (rz - this.z0) + this.y0;
this.ctx.save();
this.ctx.fillStyle = color
this.ctx.beginPath();
this.ctx.arc(px, py, kPointRadius, 0, 2 * Math.PI, true);
this.ctx.fill();
this.ctx.restore();
};
Scene.prototype.drawDyingPoints = function () {
var point_next = null;
for (var point = dyingPoints.head; point !== null; point = point_next) {
// Rotate the scene around y-axis.
scene.drawPoint(point.x, point.y, point.z, point.color());
point_next = point.next;
// Decay the current point and remove it from the list
// if it's life-force ran out.
if (point.decay()) {
dyingPoints.remove(point);
}
}
};
Scene.prototype.draw = function () {
this.ctx.save();
this.ctx.clearRect(0, 0, this.width, this.height);
this.drawDyingPoints();
this.ctx.restore();
this.angle += Math.PI / 90.0;
};
function render() {
if (typeof renderingStartTime === 'undefined') {
renderingStartTime = Date.now();
}
ModifyPointsSet();
scene.draw();
var renderingEndTime = Date.now();
var pause = renderingEndTime - renderingStartTime;
pausePlot.addPause(pause);
renderingStartTime = renderingEndTime;
pausePlot.draw();
div.innerHTML =
livePoints.count + "/" + dyingPoints.count + " " +
pause + "(max = " + pausePlot.maxPause + ") ms" ;
// Schedule next frame.
requestAnimationFrame(render);
}
function init() {
livePoints = new PointsList;
dyingPoints = new PointsList;
splayTree = new SplayTree();
scene = new Scene(640, 480);
div = document.createElement("div");
document.body.appendChild(div);
pausePlot = new PausePlot(480, 240, 160);
}
init();
render();

12
deps/v8/build/common.gypi

@ -60,8 +60,6 @@
'v8_enable_disassembler%': 0, 'v8_enable_disassembler%': 0,
'v8_object_print%': 0,
'v8_enable_gdbjit%': 0, 'v8_enable_gdbjit%': 0,
# Enable profiling support. Only required on Windows. # Enable profiling support. Only required on Windows.
@ -74,7 +72,6 @@
'v8_use_snapshot%': 'true', 'v8_use_snapshot%': 'true',
'host_os%': '<(OS)', 'host_os%': '<(OS)',
'v8_use_liveobjectlist%': 'false', 'v8_use_liveobjectlist%': 'false',
'werror%': '-Werror',
# For a shared library build, results in "libv8-<(soname_version).so". # For a shared library build, results in "libv8-<(soname_version).so".
'soname_version%': '', 'soname_version%': '',
@ -87,9 +84,6 @@
['v8_enable_disassembler==1', { ['v8_enable_disassembler==1', {
'defines': ['ENABLE_DISASSEMBLER',], 'defines': ['ENABLE_DISASSEMBLER',],
}], }],
['v8_object_print==1', {
'defines': ['OBJECT_PRINT',],
}],
['v8_enable_gdbjit==1', { ['v8_enable_gdbjit==1', {
'defines': ['ENABLE_GDB_JIT_INTERFACE',], 'defines': ['ENABLE_GDB_JIT_INTERFACE',],
}], }],
@ -191,7 +185,7 @@
], ],
}], }],
['OS=="solaris"', { ['OS=="solaris"', {
'defines': [ '__C99FEATURES__=1' ], # isinf() etc. 'defines': [ '__C99FEATURES__=1' ], # isinf() etc.
}], }],
], ],
'configurations': { 'configurations': {
@ -227,7 +221,7 @@
'cflags': [ '-I/usr/local/include' ], 'cflags': [ '-I/usr/local/include' ],
}], }],
['OS=="linux" or OS=="freebsd" or OS=="openbsd"', { ['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', 'cflags': [ '-Wall', '-Werror', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor' ], '-Wnon-virtual-dtor' ],
}], }],
], ],
@ -270,7 +264,7 @@
}], }],
['OS=="win"', { ['OS=="win"', {
'msvs_configuration_attributes': { 'msvs_configuration_attributes': {
'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)', 'OutputDirectory': '$(SolutionDir)$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)', 'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1', 'CharacterSet': '1',
}, },

31
deps/v8/build/standalone.gypi

@ -35,30 +35,25 @@
'msvs_multi_core_compile%': '1', 'msvs_multi_core_compile%': '1',
'variables': { 'variables': {
'variables': { 'variables': {
'variables': { 'conditions': [
'conditions': [ [ 'OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
[ 'OS=="linux" or OS=="freebsd" or OS=="openbsd"', { # This handles the Linux platforms we generally deal with. Anything
# This handles the Linux platforms we generally deal with. Anything # else gets passed through, which probably won't work very well; such
# else gets passed through, which probably won't work very well; such # hosts should pass an explicit target_arch to gyp.
# hosts should pass an explicit target_arch to gyp. 'host_arch%':
'host_arch%': '<!(uname -m | sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/")',
'<!(uname -m | sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/")', }, { # OS!="linux" and OS!="freebsd" and OS!="openbsd"
}, { # OS!="linux" and OS!="freebsd" and OS!="openbsd" 'host_arch%': 'ia32',
'host_arch%': 'ia32', }],
}], ],
],
},
'host_arch%': '<(host_arch)',
'target_arch%': '<(host_arch)',
}, },
'host_arch%': '<(host_arch)', 'host_arch%': '<(host_arch)',
'target_arch%': '<(target_arch)', 'target_arch%': '<(host_arch)',
'v8_target_arch%': '<(target_arch)', 'v8_target_arch%': '<(target_arch)',
}, },
'host_arch%': '<(host_arch)', 'host_arch%': '<(host_arch)',
'target_arch%': '<(target_arch)', 'target_arch%': '<(target_arch)',
'v8_target_arch%': '<(v8_target_arch)', 'v8_target_arch%': '<(v8_target_arch)',
'werror%': '-Werror',
'conditions': [ 'conditions': [
['(v8_target_arch=="arm" and host_arch!="arm") or \ ['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="x64" and host_arch!="x64")', { (v8_target_arch=="x64" and host_arch!="x64")', {
@ -79,7 +74,7 @@
'conditions': [ 'conditions': [
[ 'OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', { [ 'OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
'target_defaults': { 'target_defaults': {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', 'cflags': [ '-Wall', '-Werror', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-pthread', '-fno-rtti', '-Wnon-virtual-dtor', '-pthread', '-fno-rtti',
'-fno-exceptions', '-pedantic' ], '-fno-exceptions', '-pedantic' ],
'ldflags': [ '-pthread', ], 'ldflags': [ '-pthread', ],

5
deps/v8/include/v8-debug.h

@ -339,11 +339,6 @@ class EXPORT Debug {
static bool EnableAgent(const char* name, int port, static bool EnableAgent(const char* name, int port,
bool wait_for_connection = false); bool wait_for_connection = false);
/**
* Disable the V8 builtin debug agent. The TCP/IP connection will be closed.
*/
static void DisableAgent();
/** /**
* Makes V8 process all pending debug messages. * Makes V8 process all pending debug messages.
* *

6
deps/v8/include/v8-profiler.h

@ -307,12 +307,6 @@ class V8EXPORT HeapGraphNode {
* path from the snapshot root to the current node. * path from the snapshot root to the current node.
*/ */
const HeapGraphNode* GetDominatorNode() const; const HeapGraphNode* GetDominatorNode() const;
/**
* Finds and returns a value from the heap corresponding to this node,
* if the value is still reachable.
*/
Handle<Value> GetHeapValue() const;
}; };

42
deps/v8/include/v8.h

@ -1171,8 +1171,7 @@ class String : public Primitive {
* Get the ExternalAsciiStringResource for an external ASCII string. * Get the ExternalAsciiStringResource for an external ASCII string.
* Returns NULL if IsExternalAscii() doesn't return true. * Returns NULL if IsExternalAscii() doesn't return true.
*/ */
V8EXPORT const ExternalAsciiStringResource* GetExternalAsciiStringResource() V8EXPORT ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
const;
static inline String* Cast(v8::Value* obj); static inline String* Cast(v8::Value* obj);
@ -2452,42 +2451,24 @@ class V8EXPORT TypeSwitch : public Data {
// --- Extensions --- // --- Extensions ---
class V8EXPORT ExternalAsciiStringResourceImpl
: public String::ExternalAsciiStringResource {
public:
ExternalAsciiStringResourceImpl() : data_(0), length_(0) {}
ExternalAsciiStringResourceImpl(const char* data, size_t length)
: data_(data), length_(length) {}
const char* data() const { return data_; }
size_t length() const { return length_; }
private:
const char* data_;
size_t length_;
};
/** /**
* Ignore * Ignore
*/ */
class V8EXPORT Extension { // NOLINT class V8EXPORT Extension { // NOLINT
public: public:
// Note that the strings passed into this constructor must live as long
// as the Extension itself.
Extension(const char* name, Extension(const char* name,
const char* source = 0, const char* source = 0,
int dep_count = 0, int dep_count = 0,
const char** deps = 0, const char** deps = 0);
int source_length = -1);
virtual ~Extension() { } virtual ~Extension() { }
virtual v8::Handle<v8::FunctionTemplate> virtual v8::Handle<v8::FunctionTemplate>
GetNativeFunction(v8::Handle<v8::String> name) { GetNativeFunction(v8::Handle<v8::String> name) {
return v8::Handle<v8::FunctionTemplate>(); return v8::Handle<v8::FunctionTemplate>();
} }
const char* name() const { return name_; } const char* name() { return name_; }
size_t source_length() const { return source_length_; } const char* source() { return source_; }
const String::ExternalAsciiStringResource* source() const {
return &source_; }
int dependency_count() { return dep_count_; } int dependency_count() { return dep_count_; }
const char** dependencies() { return deps_; } const char** dependencies() { return deps_; }
void set_auto_enable(bool value) { auto_enable_ = value; } void set_auto_enable(bool value) { auto_enable_ = value; }
@ -2495,8 +2476,7 @@ class V8EXPORT Extension { // NOLINT
private: private:
const char* name_; const char* name_;
size_t source_length_; // expected to initialize before source_ const char* source_;
ExternalAsciiStringResourceImpl source_;
int dep_count_; int dep_count_;
const char** deps_; const char** deps_;
bool auto_enable_; bool auto_enable_;
@ -3518,9 +3498,9 @@ class V8EXPORT Context {
* *
* v8::Locker is a scoped lock object. While it's * v8::Locker is a scoped lock object. While it's
* active (i.e. between its construction and destruction) the current thread is * active (i.e. between its construction and destruction) the current thread is
* allowed to use the locked isolate. V8 guarantees that an isolate can be * allowed to use the locked isolate. V8 guarantees that an isolate can be locked
* locked by at most one thread at any time. In other words, the scope of a * by at most one thread at any time. In other words, the scope of a v8::Locker is
* v8::Locker is a critical section. * a critical section.
* *
* Sample usage: * Sample usage:
* \code * \code
@ -3622,8 +3602,8 @@ class V8EXPORT Locker {
static void StopPreemption(); static void StopPreemption();
/** /**
* Returns whether or not the locker for a given isolate, or default isolate * Returns whether or not the locker for a given isolate, or default isolate if NULL is given,
* if NULL is given, is locked by the current thread. * is locked by the current thread.
*/ */
static bool IsLocked(Isolate* isolate = NULL); static bool IsLocked(Isolate* isolate = NULL);
@ -3789,7 +3769,7 @@ class Internals {
static const int kFullStringRepresentationMask = 0x07; static const int kFullStringRepresentationMask = 0x07;
static const int kExternalTwoByteRepresentationTag = 0x02; static const int kExternalTwoByteRepresentationTag = 0x02;
static const int kJSObjectType = 0xa6; static const int kJSObjectType = 0xa3;
static const int kFirstNonstringType = 0x80; static const int kFirstNonstringType = 0x80;
static const int kForeignType = 0x85; static const int kForeignType = 0x85;

2
deps/v8/src/SConscript

@ -84,7 +84,6 @@ SOURCES = {
hydrogen.cc hydrogen.cc
hydrogen-instructions.cc hydrogen-instructions.cc
ic.cc ic.cc
incremental-marking.cc
inspector.cc inspector.cc
interpreter-irregexp.cc interpreter-irregexp.cc
isolate.cc isolate.cc
@ -134,7 +133,6 @@ SOURCES = {
v8utils.cc v8utils.cc
variables.cc variables.cc
version.cc version.cc
store-buffer.cc
zone.cc zone.cc
extensions/gc-extension.cc extensions/gc-extension.cc
extensions/externalize-string-extension.cc extensions/externalize-string-extension.cc

130
deps/v8/src/api.cc

@ -185,10 +185,7 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
int end_marker; int end_marker;
heap_stats.end_marker = &end_marker; heap_stats.end_marker = &end_marker;
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
// BUG(1718): isolate->heap()->RecordStats(&heap_stats, take_snapshot);
// Don't use the take_snapshot since we don't support HeapIterator here
// without doing a special GC.
isolate->heap()->RecordStats(&heap_stats, false);
i::V8::SetFatalError(); i::V8::SetFatalError();
FatalErrorCallback callback = GetFatalErrorHandler(); FatalErrorCallback callback = GetFatalErrorHandler();
{ {
@ -504,12 +501,9 @@ void RegisterExtension(Extension* that) {
Extension::Extension(const char* name, Extension::Extension(const char* name,
const char* source, const char* source,
int dep_count, int dep_count,
const char** deps, const char** deps)
int source_length)
: name_(name), : name_(name),
source_length_(source_length >= 0 ? source_(source),
source_length : (source ? strlen(source) : 0)),
source_(source, source_length_),
dep_count_(dep_count), dep_count_(dep_count),
deps_(deps), deps_(deps),
auto_enable_(false) { } auto_enable_(false) { }
@ -1787,7 +1781,7 @@ v8::Handle<v8::StackTrace> Message::GetStackTrace() const {
static i::Handle<i::Object> CallV8HeapFunction(const char* name, static i::Handle<i::Object> CallV8HeapFunction(const char* name,
i::Handle<i::Object> recv, i::Handle<i::Object> recv,
int argc, int argc,
i::Handle<i::Object> argv[], i::Object** argv[],
bool* has_pending_exception) { bool* has_pending_exception) {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
i::Handle<i::String> fmt_str = isolate->factory()->LookupAsciiSymbol(name); i::Handle<i::String> fmt_str = isolate->factory()->LookupAsciiSymbol(name);
@ -1804,10 +1798,10 @@ static i::Handle<i::Object> CallV8HeapFunction(const char* name,
static i::Handle<i::Object> CallV8HeapFunction(const char* name, static i::Handle<i::Object> CallV8HeapFunction(const char* name,
i::Handle<i::Object> data, i::Handle<i::Object> data,
bool* has_pending_exception) { bool* has_pending_exception) {
i::Handle<i::Object> argv[] = { data }; i::Object** argv[1] = { data.location() };
return CallV8HeapFunction(name, return CallV8HeapFunction(name,
i::Isolate::Current()->js_builtins_object(), i::Isolate::Current()->js_builtins_object(),
ARRAY_SIZE(argv), 1,
argv, argv,
has_pending_exception); has_pending_exception);
} }
@ -2627,11 +2621,10 @@ bool Value::Equals(Handle<Value> that) const {
if (obj->IsJSObject() && other->IsJSObject()) { if (obj->IsJSObject() && other->IsJSObject()) {
return *obj == *other; return *obj == *other;
} }
i::Handle<i::Object> args[] = { other }; i::Object** args[1] = { other.location() };
EXCEPTION_PREAMBLE(isolate); EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> result = i::Handle<i::Object> result =
CallV8HeapFunction("EQUALS", obj, ARRAY_SIZE(args), args, CallV8HeapFunction("EQUALS", obj, 1, args, &has_pending_exception);
&has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, false); EXCEPTION_BAILOUT_CHECK(isolate, false);
return *result == i::Smi::FromInt(i::EQUAL); return *result == i::Smi::FromInt(i::EQUAL);
} }
@ -3211,10 +3204,21 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
ENTER_V8(isolate); ENTER_V8(isolate);
i::HandleScope scope(isolate); i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this); i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key); i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
self,
i::JSObject::ALLOW_CREATION));
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value); i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
i::Handle<i::Object> result = i::SetHiddenProperty(self, key_obj, value_obj); EXCEPTION_PREAMBLE(isolate);
return *result == *self; i::Handle<i::Object> obj = i::SetProperty(
hidden_props,
key_obj,
value_obj,
static_cast<PropertyAttributes>(None),
i::kNonStrictMode);
has_pending_exception = obj.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, false);
return true;
} }
@ -3224,9 +3228,20 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
return Local<v8::Value>()); return Local<v8::Value>());
ENTER_V8(isolate); ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this); i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
self,
i::JSObject::OMIT_CREATION));
if (hidden_props->IsUndefined()) {
return v8::Local<v8::Value>();
}
i::Handle<i::String> key_obj = Utils::OpenHandle(*key); i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> result(self->GetHiddenProperty(*key_obj)); EXCEPTION_PREAMBLE(isolate);
if (result->IsUndefined()) return v8::Local<v8::Value>(); i::Handle<i::Object> result = i::GetProperty(hidden_props, key_obj);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, v8::Local<v8::Value>());
if (result->IsUndefined()) {
return v8::Local<v8::Value>();
}
return Utils::ToLocal(result); return Utils::ToLocal(result);
} }
@ -3237,9 +3252,15 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
ENTER_V8(isolate); ENTER_V8(isolate);
i::HandleScope scope(isolate); i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this); i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
self,
i::JSObject::OMIT_CREATION));
if (hidden_props->IsUndefined()) {
return true;
}
i::Handle<i::JSObject> js_obj(i::JSObject::cast(*hidden_props));
i::Handle<i::String> key_obj = Utils::OpenHandle(*key); i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
self->DeleteHiddenProperty(*key_obj); return i::DeleteProperty(js_obj, key_obj)->IsTrue();
return true;
} }
@ -3289,12 +3310,22 @@ void PrepareExternalArrayElements(i::Handle<i::JSObject> object,
i::Handle<i::ExternalArray> array = i::Handle<i::ExternalArray> array =
isolate->factory()->NewExternalArray(length, array_type, data); isolate->factory()->NewExternalArray(length, array_type, data);
i::Handle<i::Map> external_array_map = // If the object already has external elements, create a new, unique
isolate->factory()->GetElementsTransitionMap( // map if the element type is now changing, because assumptions about
object, // generated code based on the receiver's map will be invalid.
GetElementsKindFromExternalArrayType(array_type)); i::Handle<i::HeapObject> elements(object->elements());
bool cant_reuse_map =
object->set_map(*external_array_map); elements->map()->IsUndefined() ||
!elements->map()->has_external_array_elements() ||
elements->map() != isolate->heap()->MapForExternalArrayType(array_type);
if (cant_reuse_map) {
i::Handle<i::Map> external_array_map =
isolate->factory()->GetElementsTransitionMap(
i::Handle<i::Map>(object->map()),
GetElementsKindFromExternalArrayType(array_type),
object->HasFastProperties());
object->set_map(*external_array_map);
}
object->set_elements(*array); object->set_elements(*array);
} }
@ -3453,8 +3484,7 @@ bool v8::Object::IsCallable() {
} }
Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv, Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv, int argc,
int argc,
v8::Handle<v8::Value> argv[]) { v8::Handle<v8::Value> argv[]) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::CallAsFunction()", ON_BAILOUT(isolate, "v8::Object::CallAsFunction()",
@ -3465,7 +3495,7 @@ Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
i::Handle<i::JSObject> obj = Utils::OpenHandle(this); i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv); i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**)); STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv); i::Object*** args = reinterpret_cast<i::Object***>(argv);
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>(); i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>();
if (obj->IsJSFunction()) { if (obj->IsJSFunction()) {
fun = i::Handle<i::JSFunction>::cast(obj); fun = i::Handle<i::JSFunction>::cast(obj);
@ -3495,7 +3525,7 @@ Local<v8::Value> Object::CallAsConstructor(int argc,
i::HandleScope scope(isolate); i::HandleScope scope(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this); i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**)); STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv); i::Object*** args = reinterpret_cast<i::Object***>(argv);
if (obj->IsJSFunction()) { if (obj->IsJSFunction()) {
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(obj); i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(obj);
EXCEPTION_PREAMBLE(isolate); EXCEPTION_PREAMBLE(isolate);
@ -3537,7 +3567,7 @@ Local<v8::Object> Function::NewInstance(int argc,
HandleScope scope; HandleScope scope;
i::Handle<i::JSFunction> function = Utils::OpenHandle(this); i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**)); STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv); i::Object*** args = reinterpret_cast<i::Object***>(argv);
EXCEPTION_PREAMBLE(isolate); EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> returned = i::Handle<i::Object> returned =
i::Execution::New(function, argc, args, &has_pending_exception); i::Execution::New(function, argc, args, &has_pending_exception);
@ -3558,7 +3588,7 @@ Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
i::Handle<i::JSFunction> fun = Utils::OpenHandle(this); i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv); i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**)); STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv); i::Object*** args = reinterpret_cast<i::Object***>(argv);
EXCEPTION_PREAMBLE(isolate); EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> returned = i::Handle<i::Object> returned =
i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception); i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
@ -3769,11 +3799,10 @@ bool v8::String::IsExternalAscii() const {
void v8::String::VerifyExternalStringResource( void v8::String::VerifyExternalStringResource(
v8::String::ExternalStringResource* value) const { v8::String::ExternalStringResource* value) const {
i::Handle<i::String> str = Utils::OpenHandle(this); i::Handle<i::String> str = Utils::OpenHandle(this);
const v8::String::ExternalStringResource* expected; v8::String::ExternalStringResource* expected;
if (i::StringShape(*str).IsExternalTwoByte()) { if (i::StringShape(*str).IsExternalTwoByte()) {
const void* resource = void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
i::Handle<i::ExternalTwoByteString>::cast(str)->resource(); expected = reinterpret_cast<ExternalStringResource*>(resource);
expected = reinterpret_cast<const ExternalStringResource*>(resource);
} else { } else {
expected = NULL; expected = NULL;
} }
@ -3781,7 +3810,7 @@ void v8::String::VerifyExternalStringResource(
} }
const v8::String::ExternalAsciiStringResource* v8::String::ExternalAsciiStringResource*
v8::String::GetExternalAsciiStringResource() const { v8::String::GetExternalAsciiStringResource() const {
i::Handle<i::String> str = Utils::OpenHandle(this); i::Handle<i::String> str = Utils::OpenHandle(this);
if (IsDeadCheck(str->GetIsolate(), if (IsDeadCheck(str->GetIsolate(),
@ -3789,9 +3818,8 @@ const v8::String::ExternalAsciiStringResource*
return NULL; return NULL;
} }
if (i::StringShape(*str).IsExternalAscii()) { if (i::StringShape(*str).IsExternalAscii()) {
const void* resource = void* resource = i::Handle<i::ExternalAsciiString>::cast(str)->resource();
i::Handle<i::ExternalAsciiString>::cast(str)->resource(); return reinterpret_cast<ExternalAsciiStringResource*>(resource);
return reinterpret_cast<const ExternalAsciiStringResource*>(resource);
} else { } else {
return NULL; return NULL;
} }
@ -3981,7 +4009,7 @@ bool v8::V8::IdleNotification() {
void v8::V8::LowMemoryNotification() { void v8::V8::LowMemoryNotification() {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
if (!isolate->IsInitialized()) return; if (!isolate->IsInitialized()) return;
isolate->heap()->CollectAllAvailableGarbage(); isolate->heap()->CollectAllGarbage(true);
} }
@ -5452,12 +5480,6 @@ bool Debug::EnableAgent(const char* name, int port, bool wait_for_connection) {
wait_for_connection); wait_for_connection);
} }
void Debug::DisableAgent() {
return i::Isolate::Current()->debugger()->StopAgent();
}
void Debug::ProcessDebugMessages() { void Debug::ProcessDebugMessages() {
i::Execution::ProcessDebugMesssages(true); i::Execution::ProcessDebugMesssages(true);
} }
@ -5782,16 +5804,6 @@ const HeapGraphNode* HeapGraphNode::GetDominatorNode() const {
} }
v8::Handle<v8::Value> HeapGraphNode::GetHeapValue() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapGraphNode::GetHeapValue");
i::Handle<i::HeapObject> object = ToInternal(this)->GetHeapObject();
return v8::Handle<Value>(!object.is_null() ?
ToApi<Value>(object) : ToApi<Value>(
isolate->factory()->undefined_value()));
}
static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) { static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
return const_cast<i::HeapSnapshot*>( return const_cast<i::HeapSnapshot*>(
reinterpret_cast<const i::HeapSnapshot*>(snapshot)); reinterpret_cast<const i::HeapSnapshot*>(snapshot));

24
deps/v8/src/arm/assembler-arm-inl.h

@ -77,11 +77,6 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target) { void RelocInfo::set_target_address(Address target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target); Assembler::set_target_address_at(pc_, target);
if (host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
}
} }
@ -106,10 +101,6 @@ Object** RelocInfo::target_object_address() {
void RelocInfo::set_target_object(Object* target) { void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target)); Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
if (host() != NULL && target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
host(), &Memory::Object_at(pc_), HeapObject::cast(target));
}
} }
@ -140,12 +131,6 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL); ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset; Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address; Memory::Address_at(pc_) = address;
if (host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
// evacuation candidate.
host()->GetHeap()->incremental_marking()->RecordWrite(
host(), NULL, cell);
}
} }
@ -162,11 +147,6 @@ void RelocInfo::set_call_address(Address target) {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target; Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
}
} }
@ -215,7 +195,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
void RelocInfo::Visit(ObjectVisitor* visitor) { void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode(); RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) { if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), target_object_address()); visitor->VisitPointer(target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) { } else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this); visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
@ -241,7 +221,7 @@ template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) { void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode(); RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) { if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitEmbeddedPointer(heap, host(), target_object_address()); StaticVisitor::VisitPointer(heap, target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) { } else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this); StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {

12
deps/v8/src/arm/assembler-arm.cc

@ -78,9 +78,7 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
void CpuFeatures::Probe() { void CpuFeatures::Probe() {
unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() | ASSERT(!initialized_);
CpuFeaturesImpliedByCompiler());
ASSERT(supported_ == 0 || supported_ == standard_features);
#ifdef DEBUG #ifdef DEBUG
initialized_ = true; initialized_ = true;
#endif #endif
@ -88,7 +86,8 @@ void CpuFeatures::Probe() {
// Get the features implied by the OS and the compiler settings. This is the // Get the features implied by the OS and the compiler settings. This is the
// minimal set of features which is also alowed for generated code in the // minimal set of features which is also alowed for generated code in the
// snapshot. // snapshot.
supported_ |= standard_features; supported_ |= OS::CpuFeaturesImpliedByPlatform();
supported_ |= CpuFeaturesImpliedByCompiler();
if (Serializer::enabled()) { if (Serializer::enabled()) {
// No probing for features if we might serialize (generate snapshot). // No probing for features if we might serialize (generate snapshot).
@ -2506,8 +2505,7 @@ void Assembler::dd(uint32_t data) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants. RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
RelocInfo rinfo(pc_, rmode, data, NULL);
if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) { if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
// Adjust code for new modes. // Adjust code for new modes.
ASSERT(RelocInfo::IsDebugBreakSlot(rmode) ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
@ -2539,7 +2537,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
} }
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL); RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
ClearRecordedAstId(); ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id); reloc_info_writer.Write(&reloc_info_with_ast_id);
} else { } else {

10
deps/v8/src/arm/assembler-arm.h

@ -1209,10 +1209,6 @@ class Assembler : public AssemblerBase {
PositionsRecorder* positions_recorder() { return &positions_recorder_; } PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Read/patch instructions // Read/patch instructions
Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
void instr_at_put(int pos, Instr instr) {
*reinterpret_cast<Instr*>(buffer_ + pos) = instr;
}
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); } static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
static void instr_at_put(byte* pc, Instr instr) { static void instr_at_put(byte* pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr; *reinterpret_cast<Instr*>(pc) = instr;
@ -1267,6 +1263,12 @@ class Assembler : public AssemblerBase {
int buffer_space() const { return reloc_info_writer.pos() - pc_; } int buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Read/patch instructions
Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
void instr_at_put(int pos, Instr instr) {
*reinterpret_cast<Instr*>(buffer_ + pos) = instr;
}
// Decode branch instruction at pos and return branch target pos // Decode branch instruction at pos and return branch target pos
int target_at(int pos); int target_at(int pos);

1118
deps/v8/src/arm/builtins-arm.cc

File diff suppressed because it is too large

585
deps/v8/src/arm/code-stubs-arm.cc

@ -189,72 +189,6 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
} }
void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
//
// [sp]: function.
// [sp + kPointerSize]: serialized scope info
// Try to allocate the context in new space.
Label gc;
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
__ AllocateInNewSpace(FixedArray::SizeFor(length),
r0, r1, r2, &gc, TAG_OBJECT);
// Load the function from the stack.
__ ldr(r3, MemOperand(sp, 0));
// Load the serialized scope info from the stack.
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
// Setup the object header.
__ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ mov(r2, Operand(Smi::FromInt(length)));
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
// If this block context is nested in the global context we get a smi
// sentinel instead of a function. The block context should get the
// canonical empty function of the global context as its closure which
// we still have to look up.
Label after_sentinel;
__ JumpIfNotSmi(r3, &after_sentinel);
if (FLAG_debug_code) {
const char* message = "Expected 0 as a Smi sentinel";
__ cmp(r3, Operand::Zero());
__ Assert(eq, message);
}
__ ldr(r3, GlobalObjectOperand());
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
__ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
// Setup the fixed slots.
__ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
__ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
__ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
// Copy the global object from the previous context.
__ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
__ str(r1, ContextOperand(r0, Context::GLOBAL_INDEX));
// Initialize the rest of the slots to the hole value.
__ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
for (int i = 0; i < slots_; i++) {
__ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS));
}
// Remove the on-stack argument and return.
__ mov(cp, r0);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
// Need to collect. Call into runtime system.
__ bind(&gc);
__ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
}
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// Stack layout on entry: // Stack layout on entry:
// //
@ -904,11 +838,9 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
__ vmov(d0, r0, r1); __ vmov(d0, r0, r1);
__ vmov(d1, r2, r3); __ vmov(d1, r2, r3);
} }
{ // Call C routine that may not cause GC or other trouble.
AllowExternalCallThatCantCauseGC scope(masm); __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
__ CallCFunction( 0, 2);
ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
}
// Store answer in the overwritable heap number. Double returned in // Store answer in the overwritable heap number. Double returned in
// registers r0 and r1 or in d0. // registers r0 and r1 or in d0.
if (masm->use_eabi_hardfloat()) { if (masm->use_eabi_hardfloat()) {
@ -925,29 +857,6 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
} }
bool WriteInt32ToHeapNumberStub::IsPregenerated() {
// These variants are compiled ahead of time. See next method.
if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
return true;
}
if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
return true;
}
// Other register combinations are generated as and when they are needed,
// so it is unsafe to call them from stubs (we can't generate a stub while
// we are generating a stub).
return false;
}
void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
stub1.GetCode()->set_is_pregenerated(true);
stub2.GetCode()->set_is_pregenerated(true);
}
// See comment for class. // See comment for class.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int; Label max_negative_int;
@ -1288,8 +1197,6 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
__ vmov(d0, r0, r1); __ vmov(d0, r0, r1);
__ vmov(d1, r2, r3); __ vmov(d1, r2, r3);
} }
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
0, 2); 0, 2);
__ pop(pc); // Return. __ pop(pc); // Return.
@ -1307,7 +1214,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// If either operand is a JS object or an oddball value, then they are // If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different. // not equal since their pointers are different.
// There is no test for undetectability in strict equality. // There is no test for undetectability in strict equality.
STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
Label first_non_object; Label first_non_object;
// Get the type of the first operand into r2 and compare it with // Get the type of the first operand into r2 and compare it with
// FIRST_SPEC_OBJECT_TYPE. // FIRST_SPEC_OBJECT_TYPE.
@ -1699,8 +1606,6 @@ void CompareStub::Generate(MacroAssembler* masm) {
// The stub expects its argument in the tos_ register and returns its result in // The stub expects its argument in the tos_ register and returns its result in
// it, too: zero for false, and a non-zero value for true. // it, too: zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) { void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
// This stub uses VFP3 instructions. // This stub uses VFP3 instructions.
CpuFeatures::Scope scope(VFP3); CpuFeatures::Scope scope(VFP3);
@ -1808,41 +1713,6 @@ void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
} }
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
// restore them.
__ stm(db_w, sp, kCallerSaved | lr.bit());
if (save_doubles_ == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP3);
__ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
__ vstr(reg, MemOperand(sp, i * kDoubleSize));
}
}
const int argument_count = 1;
const int fp_argument_count = 0;
const Register scratch = r1;
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
__ mov(r0, Operand(ExternalReference::isolate_address()));
__ CallCFunction(
ExternalReference::store_buffer_overflow_function(masm->isolate()),
argument_count);
if (save_doubles_ == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP3);
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
__ vldr(reg, MemOperand(sp, i * kDoubleSize));
}
__ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
}
__ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
}
void UnaryOpStub::PrintName(StringStream* stream) { void UnaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_); const char* op_name = Token::Name(op_);
const char* overwrite_name = NULL; // Make g++ happy. const char* overwrite_name = NULL; // Make g++ happy.
@ -1996,13 +1866,12 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
__ jmp(&heapnumber_allocated); __ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber); __ bind(&slow_allocate_heapnumber);
{ __ EnterInternalFrame();
FrameScope scope(masm, StackFrame::INTERNAL); __ push(r0);
__ push(r0); __ CallRuntime(Runtime::kNumberAlloc, 0);
__ CallRuntime(Runtime::kNumberAlloc, 0); __ mov(r1, Operand(r0));
__ mov(r1, Operand(r0)); __ pop(r0);
__ pop(r0); __ LeaveInternalFrame();
}
__ bind(&heapnumber_allocated); __ bind(&heapnumber_allocated);
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
@ -2043,14 +1912,13 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
__ jmp(&heapnumber_allocated); __ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber); __ bind(&slow_allocate_heapnumber);
{ __ EnterInternalFrame();
FrameScope scope(masm, StackFrame::INTERNAL); __ push(r0); // Push the heap number, not the untagged int32.
__ push(r0); // Push the heap number, not the untagged int32. __ CallRuntime(Runtime::kNumberAlloc, 0);
__ CallRuntime(Runtime::kNumberAlloc, 0); __ mov(r2, r0); // Move the new heap number into r2.
__ mov(r2, r0); // Move the new heap number into r2. // Get the heap number into r0, now that the new heap number is in r2.
// Get the heap number into r0, now that the new heap number is in r2. __ pop(r0);
__ pop(r0); __ LeaveInternalFrame();
}
// Convert the heap number in r0 to an untagged integer in r1. // Convert the heap number in r0 to an untagged integer in r1.
// This can't go slow-case because it's the same number we already // This can't go slow-case because it's the same number we already
@ -2160,10 +2028,6 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
void BinaryOpStub::Generate(MacroAssembler* masm) { void BinaryOpStub::Generate(MacroAssembler* masm) {
// Explicitly allow generation of nested stubs. It is safe here because
// generation code does not use any raw pointers.
AllowStubCallsScope allow_stub_calls(masm, true);
switch (operands_type_) { switch (operands_type_) {
case BinaryOpIC::UNINITIALIZED: case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm); GenerateTypeTransition(masm);
@ -3269,11 +3133,10 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache); __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
__ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
{ __ EnterInternalFrame();
FrameScope scope(masm, StackFrame::INTERNAL); __ push(r0);
__ push(r0); __ CallRuntime(RuntimeFunction(), 1);
__ CallRuntime(RuntimeFunction(), 1); __ LeaveInternalFrame();
}
__ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ Ret(); __ Ret();
@ -3286,15 +3149,14 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// We return the value in d2 without adding it to the cache, but // We return the value in d2 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed. // we cause a scavenging GC so that future allocations will succeed.
{ __ EnterInternalFrame();
FrameScope scope(masm, StackFrame::INTERNAL);
// Allocate an aligned object larger than a HeapNumber.
// Allocate an aligned object larger than a HeapNumber. ASSERT(4 * kPointerSize >= HeapNumber::kSize);
ASSERT(4 * kPointerSize >= HeapNumber::kSize); __ mov(scratch0, Operand(4 * kPointerSize));
__ mov(scratch0, Operand(4 * kPointerSize)); __ push(scratch0);
__ push(scratch0); __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
__ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); __ LeaveInternalFrame();
}
__ Ret(); __ Ret();
} }
} }
@ -3311,7 +3173,6 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
} else { } else {
__ vmov(r0, r1, d2); __ vmov(r0, r1, d2);
} }
AllowExternalCallThatCantCauseGC scope(masm);
switch (type_) { switch (type_) {
case TranscendentalCache::SIN: case TranscendentalCache::SIN:
__ CallCFunction(ExternalReference::math_sin_double_function(isolate), __ CallCFunction(ExternalReference::math_sin_double_function(isolate),
@ -3407,14 +3268,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(lr); __ push(lr);
__ PrepareCallCFunction(1, 1, scratch); __ PrepareCallCFunction(1, 1, scratch);
__ SetCallCDoubleArguments(double_base, exponent); __ SetCallCDoubleArguments(double_base, exponent);
{ __ CallCFunction(
AllowExternalCallThatCantCauseGC scope(masm); ExternalReference::power_double_int_function(masm->isolate()),
__ CallCFunction( 1, 1);
ExternalReference::power_double_int_function(masm->isolate()), __ pop(lr);
1, 1); __ GetCFunctionDoubleResult(double_result);
__ pop(lr);
__ GetCFunctionDoubleResult(double_result);
}
__ vstr(double_result, __ vstr(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(r0, heapnumber); __ mov(r0, heapnumber);
@ -3440,14 +3298,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(lr); __ push(lr);
__ PrepareCallCFunction(0, 2, scratch); __ PrepareCallCFunction(0, 2, scratch);
__ SetCallCDoubleArguments(double_base, double_exponent); __ SetCallCDoubleArguments(double_base, double_exponent);
{ __ CallCFunction(
AllowExternalCallThatCantCauseGC scope(masm); ExternalReference::power_double_double_function(masm->isolate()),
__ CallCFunction( 0, 2);
ExternalReference::power_double_double_function(masm->isolate()), __ pop(lr);
0, 2); __ GetCFunctionDoubleResult(double_result);
__ pop(lr);
__ GetCFunctionDoubleResult(double_result);
}
__ vstr(double_result, __ vstr(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
__ mov(r0, heapnumber); __ mov(r0, heapnumber);
@ -3464,37 +3319,6 @@ bool CEntryStub::NeedsImmovableCode() {
} }
bool CEntryStub::IsPregenerated() {
return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
result_size_ == 1;
}
void CodeStub::GenerateStubsAheadOfTime() {
CEntryStub::GenerateAheadOfTime();
WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
}
void CodeStub::GenerateFPStubs() {
CEntryStub save_doubles(1, kSaveFPRegs);
Handle<Code> code = save_doubles.GetCode();
code->set_is_pregenerated(true);
StoreBufferOverflowStub stub(kSaveFPRegs);
stub.GetCode()->set_is_pregenerated(true);
code->GetIsolate()->set_fp_stubs_generated(true);
}
void CEntryStub::GenerateAheadOfTime() {
CEntryStub stub(1, kDontSaveFPRegs);
Handle<Code> code = stub.GetCode();
code->set_is_pregenerated(true);
}
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ Throw(r0); __ Throw(r0);
} }
@ -3606,7 +3430,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ b(eq, throw_out_of_memory_exception); __ b(eq, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable. // Retrieve the pending exception and clear the variable.
__ mov(r3, Operand(isolate->factory()->the_hole_value())); __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
__ ldr(r3, MemOperand(ip));
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate))); isolate)));
__ ldr(r0, MemOperand(ip)); __ ldr(r0, MemOperand(ip));
@ -3644,7 +3469,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ sub(r6, r6, Operand(kPointerSize)); __ sub(r6, r6, Operand(kPointerSize));
// Enter the exit frame that transitions from JavaScript to C++. // Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles_); __ EnterExitFrame(save_doubles_);
// Setup argc and the builtin function in callee-saved registers. // Setup argc and the builtin function in callee-saved registers.
@ -3789,7 +3613,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// saved values before returning a failure to C. // saved values before returning a failure to C.
// Clear any pending exceptions. // Clear any pending exceptions.
__ mov(r5, Operand(isolate->factory()->the_hole_value())); __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
__ ldr(r5, MemOperand(ip));
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate))); isolate)));
__ str(r5, MemOperand(ip)); __ str(r5, MemOperand(ip));
@ -4026,11 +3851,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
} }
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else { } else {
{ __ EnterInternalFrame();
FrameScope scope(masm, StackFrame::INTERNAL); __ Push(r0, r1);
__ Push(r0, r1); __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); __ LeaveInternalFrame();
}
__ cmp(r0, Operand::Zero()); __ cmp(r0, Operand::Zero());
__ LoadRoot(r0, Heap::kTrueValueRootIndex, eq); __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
__ LoadRoot(r0, Heap::kFalseValueRootIndex, ne); __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
@ -4656,7 +4480,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// For arguments 4 and 3 get string length, calculate start of string data and // For arguments 4 and 3 get string length, calculate start of string data and
// calculate the shift of the index (0 for ASCII and 1 for two byte). // calculate the shift of the index (0 for ASCII and 1 for two byte).
__ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
__ add(r8, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ eor(r3, r3, Operand(1)); __ eor(r3, r3, Operand(1));
// Load the length from the original subject string from the previous stack // Load the length from the original subject string from the previous stack
// frame. Therefore we have to use fp, which points exactly to two pointer // frame. Therefore we have to use fp, which points exactly to two pointer
@ -4707,7 +4532,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// stack overflow (on the backtrack stack) was detected in RegExp code but // stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system. // haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception. // TODO(592): Rerunning the RegExp to get the stack overflow exception.
__ mov(r1, Operand(isolate->factory()->the_hole_value())); __ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate)));
__ ldr(r1, MemOperand(r1, 0));
__ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate))); isolate)));
__ ldr(r0, MemOperand(r2, 0)); __ ldr(r0, MemOperand(r2, 0));
@ -4749,25 +4575,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ str(r2, FieldMemOperand(last_match_info_elements, __ str(r2, FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastCaptureCountOffset)); RegExpImpl::kLastCaptureCountOffset));
// Store last subject and last input. // Store last subject and last input.
__ mov(r3, last_match_info_elements); // Moved up to reduce latency.
__ str(subject, __ str(subject,
FieldMemOperand(last_match_info_elements, FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastSubjectOffset)); RegExpImpl::kLastSubjectOffset));
__ mov(r2, subject); __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
__ RecordWriteField(last_match_info_elements,
RegExpImpl::kLastSubjectOffset,
r2,
r7,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
__ str(subject, __ str(subject,
FieldMemOperand(last_match_info_elements, FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset)); RegExpImpl::kLastInputOffset));
__ RecordWriteField(last_match_info_elements, __ mov(r3, last_match_info_elements);
RegExpImpl::kLastInputOffset, __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
subject,
r7,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code. // Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector = ExternalReference address_of_static_offsets_vector =
@ -4895,22 +4712,6 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
} }
void CallFunctionStub::FinishCode(Code* code) {
code->set_has_function_cache(false);
}
void CallFunctionStub::Clear(Heap* heap, Address address) {
UNREACHABLE();
}
Object* CallFunctionStub::GetCachedValue(Address address) {
UNREACHABLE();
return NULL;
}
void CallFunctionStub::Generate(MacroAssembler* masm) { void CallFunctionStub::Generate(MacroAssembler* masm) {
Label slow, non_function; Label slow, non_function;
@ -6624,13 +6425,12 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
// Call the runtime system in a fresh internal frame. // Call the runtime system in a fresh internal frame.
ExternalReference miss = ExternalReference miss =
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
{ __ EnterInternalFrame();
FrameScope scope(masm, StackFrame::INTERNAL); __ Push(r1, r0);
__ Push(r1, r0); __ mov(ip, Operand(Smi::FromInt(op_)));
__ mov(ip, Operand(Smi::FromInt(op_))); __ push(ip);
__ push(ip); __ CallExternalReference(miss, 3);
__ CallExternalReference(miss, 3); __ LeaveInternalFrame();
}
// Compute the entry point of the rewritten stub. // Compute the entry point of the rewritten stub.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers. // Restore registers.
@ -6813,8 +6613,6 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
// Registers: // Registers:
// result: StringDictionary to probe // result: StringDictionary to probe
// r1: key // r1: key
@ -6904,267 +6702,6 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
} }
struct AheadOfTimeWriteBarrierStubList {
Register object, value, address;
RememberedSetAction action;
};
struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Used in RegExpExecStub.
{ r6, r4, r7, EMIT_REMEMBERED_SET },
{ r6, r2, r7, EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
// Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
// Also used in KeyedStoreIC::GenerateGeneric.
{ r3, r4, r5, EMIT_REMEMBERED_SET },
// Used in CompileStoreGlobal.
{ r4, r1, r2, OMIT_REMEMBERED_SET },
// Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
{ r1, r2, r3, EMIT_REMEMBERED_SET },
{ r3, r2, r1, EMIT_REMEMBERED_SET },
// Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
{ r2, r1, r3, EMIT_REMEMBERED_SET },
{ r3, r1, r2, EMIT_REMEMBERED_SET },
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ r4, r2, r3, EMIT_REMEMBERED_SET },
// Null termination.
{ no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
};
bool RecordWriteStub::IsPregenerated() {
for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
if (object_.is(entry->object) &&
value_.is(entry->value) &&
address_.is(entry->address) &&
remembered_set_action_ == entry->action &&
save_fp_regs_mode_ == kDontSaveFPRegs) {
return true;
}
}
return false;
}
bool StoreBufferOverflowStub::IsPregenerated() {
return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
}
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
StoreBufferOverflowStub stub1(kDontSaveFPRegs);
stub1.GetCode()->set_is_pregenerated(true);
}
void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
RecordWriteStub stub(entry->object,
entry->value,
entry->address,
entry->action,
kDontSaveFPRegs);
stub.GetCode()->set_is_pregenerated(true);
}
}
// Takes the input in 3 registers: address_ value_ and object_. A pointer to
// the value has just been written into the object, now this stub makes sure
// we keep the GC informed. The word in the object where the value has been
// written is in the address register.
void RecordWriteStub::Generate(MacroAssembler* masm) {
Label skip_to_incremental_noncompacting;
Label skip_to_incremental_compacting;
// The first two instructions are generated with labels so as to get the
// offset fixed up correctly by the bind(Label*) call. We patch it back and
// forth between a compare instructions (a nop in this position) and the
// real branch when we start and stop incremental heap marking.
// See RecordWriteStub::Patch for details.
__ b(&skip_to_incremental_noncompacting);
__ b(&skip_to_incremental_compacting);
if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
__ RememberedSetHelper(object_,
address_,
value_,
save_fp_regs_mode_,
MacroAssembler::kReturnAtEnd);
}
__ Ret();
__ bind(&skip_to_incremental_noncompacting);
GenerateIncremental(masm, INCREMENTAL);
__ bind(&skip_to_incremental_compacting);
GenerateIncremental(masm, INCREMENTAL_COMPACTION);
// Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
// Will be checked in IncrementalMarking::ActivateGeneratedStub.
ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
PatchBranchIntoNop(masm, 0);
PatchBranchIntoNop(masm, Assembler::kInstrSize);
}
void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.Save(masm);
if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
Label dont_need_remembered_set;
__ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
__ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
regs_.scratch0(),
&dont_need_remembered_set);
__ CheckPageFlag(regs_.object(),
regs_.scratch0(),
1 << MemoryChunk::SCAN_ON_SCAVENGE,
ne,
&dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.
CheckNeedsToInformIncrementalMarker(
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
InformIncrementalMarker(masm, mode);
regs_.Restore(masm);
__ RememberedSetHelper(object_,
address_,
value_,
save_fp_regs_mode_,
MacroAssembler::kReturnAtEnd);
__ bind(&dont_need_remembered_set);
}
CheckNeedsToInformIncrementalMarker(
masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
InformIncrementalMarker(masm, mode);
regs_.Restore(masm);
__ Ret();
}
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
Register address =
r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
ASSERT(!address.is(regs_.object()));
ASSERT(!address.is(r0));
__ Move(address, regs_.address());
__ Move(r0, regs_.object());
if (mode == INCREMENTAL_COMPACTION) {
__ Move(r1, address);
} else {
ASSERT(mode == INCREMENTAL);
__ ldr(r1, MemOperand(address, 0));
}
__ mov(r2, Operand(ExternalReference::isolate_address()));
AllowExternalCallThatCantCauseGC scope(masm);
if (mode == INCREMENTAL_COMPACTION) {
__ CallCFunction(
ExternalReference::incremental_evacuation_record_write_function(
masm->isolate()),
argument_count);
} else {
ASSERT(mode == INCREMENTAL);
__ CallCFunction(
ExternalReference::incremental_marking_record_write_function(
masm->isolate()),
argument_count);
}
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
}
void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode) {
Label on_black;
Label need_incremental;
Label need_incremental_pop_scratch;
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
__ RememberedSetHelper(object_,
address_,
value_,
save_fp_regs_mode_,
MacroAssembler::kReturnAtEnd);
} else {
__ Ret();
}
__ bind(&on_black);
// Get the value from the slot.
__ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
if (mode == INCREMENTAL_COMPACTION) {
Label ensure_not_white;
__ CheckPageFlag(regs_.scratch0(), // Contains value.
regs_.scratch1(), // Scratch.
MemoryChunk::kEvacuationCandidateMask,
eq,
&ensure_not_white);
__ CheckPageFlag(regs_.object(),
regs_.scratch1(), // Scratch.
MemoryChunk::kSkipEvacuationSlotsRecordingMask,
eq,
&need_incremental);
__ bind(&ensure_not_white);
}
// We need extra registers for this, so we push the object and the address
// register temporarily.
__ Push(regs_.object(), regs_.address());
__ EnsureNotWhite(regs_.scratch0(), // The value.
regs_.scratch1(), // Scratch.
regs_.object(), // Scratch.
regs_.address(), // Scratch.
&need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
__ RememberedSetHelper(object_,
address_,
value_,
save_fp_regs_mode_,
MacroAssembler::kReturnAtEnd);
} else {
__ Ret();
}
__ bind(&need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
__ bind(&need_incremental);
// Fall through when we need to inform the incremental marker.
}
#undef __ #undef __
} } // namespace v8::internal } } // namespace v8::internal

245
deps/v8/src/arm/code-stubs-arm.h

@ -58,25 +58,6 @@ class TranscendentalCacheStub: public CodeStub {
}; };
class StoreBufferOverflowStub: public CodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
void Generate(MacroAssembler* masm);
virtual bool IsPregenerated();
static void GenerateFixedRegStubsAheadOfTime();
virtual bool SometimesSetsUpAFrame() { return false; }
private:
SaveFPRegsMode save_doubles_;
Major MajorKey() { return StoreBufferOverflow; }
int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
};
class UnaryOpStub: public CodeStub { class UnaryOpStub: public CodeStub {
public: public:
UnaryOpStub(Token::Value op, UnaryOpStub(Token::Value op,
@ -342,9 +323,6 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
the_heap_number_(the_heap_number), the_heap_number_(the_heap_number),
scratch_(scratch) { } scratch_(scratch) { }
bool IsPregenerated();
static void GenerateFixedRegStubsAheadOfTime();
private: private:
Register the_int_; Register the_int_;
Register the_heap_number_; Register the_heap_number_;
@ -393,225 +371,6 @@ class NumberToStringStub: public CodeStub {
}; };
class RecordWriteStub: public CodeStub {
public:
RecordWriteStub(Register object,
Register value,
Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
: object_(object),
value_(value),
address_(address),
remembered_set_action_(remembered_set_action),
save_fp_regs_mode_(fp_mode),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
}
enum Mode {
STORE_BUFFER_ONLY,
INCREMENTAL,
INCREMENTAL_COMPACTION
};
virtual bool IsPregenerated();
static void GenerateFixedRegStubsAheadOfTime();
virtual bool SometimesSetsUpAFrame() { return false; }
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos)));
}
static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27);
ASSERT(Assembler::IsBranch(masm->instr_at(pos)));
}
static Mode GetMode(Code* stub) {
Instr first_instruction = Assembler::instr_at(stub->instruction_start());
Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
Assembler::kInstrSize);
if (Assembler::IsBranch(first_instruction)) {
return INCREMENTAL;
}
ASSERT(Assembler::IsTstImmediate(first_instruction));
if (Assembler::IsBranch(second_instruction)) {
return INCREMENTAL_COMPACTION;
}
ASSERT(Assembler::IsTstImmediate(second_instruction));
return STORE_BUFFER_ONLY;
}
static void Patch(Code* stub, Mode mode) {
MacroAssembler masm(NULL,
stub->instruction_start(),
stub->instruction_size());
switch (mode) {
case STORE_BUFFER_ONLY:
ASSERT(GetMode(stub) == INCREMENTAL ||
GetMode(stub) == INCREMENTAL_COMPACTION);
PatchBranchIntoNop(&masm, 0);
PatchBranchIntoNop(&masm, Assembler::kInstrSize);
break;
case INCREMENTAL:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
PatchNopIntoBranch(&masm, 0);
break;
case INCREMENTAL_COMPACTION:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
PatchNopIntoBranch(&masm, Assembler::kInstrSize);
break;
}
ASSERT(GetMode(stub) == mode);
CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize);
}
private:
// This is a helper class for freeing up 3 scratch registers. The input is
// two registers that must be preserved and one scratch register provided by
// the caller.
class RegisterAllocation {
public:
RegisterAllocation(Register object,
Register address,
Register scratch0)
: object_(object),
address_(address),
scratch0_(scratch0) {
ASSERT(!AreAliased(scratch0, object, address, no_reg));
scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
}
void Save(MacroAssembler* masm) {
ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
// We don't have to save scratch0_ because it was given to us as
// a scratch register.
masm->push(scratch1_);
}
void Restore(MacroAssembler* masm) {
masm->pop(scratch1_);
}
// If we have to call into C then we need to save and restore all caller-
// saved registers that were not already preserved. The scratch registers
// will be restored by other means so we don't bother pushing them here.
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
if (mode == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP3);
masm->sub(sp,
sp,
Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
// Save all VFP registers except d0.
for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
}
}
}
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP3);
// Restore all VFP registers except d0.
for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
}
masm->add(sp,
sp,
Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
}
masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
}
inline Register object() { return object_; }
inline Register address() { return address_; }
inline Register scratch0() { return scratch0_; }
inline Register scratch1() { return scratch1_; }
private:
Register object_;
Register address_;
Register scratch0_;
Register scratch1_;
Register GetRegThatIsNotOneOf(Register r1,
Register r2,
Register r3) {
for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(r1)) continue;
if (candidate.is(r2)) continue;
if (candidate.is(r3)) continue;
return candidate;
}
UNREACHABLE();
return no_reg;
}
friend class RecordWriteStub;
};
enum OnNoNeedToInformIncrementalMarker {
kReturnOnNoNeedToInformIncrementalMarker,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
void Generate(MacroAssembler* masm);
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
Major MajorKey() { return RecordWrite; }
int MinorKey() {
return ObjectBits::encode(object_.code()) |
ValueBits::encode(value_.code()) |
AddressBits::encode(address_.code()) |
RememberedSetActionBits::encode(remembered_set_action_) |
SaveFPRegsModeBits::encode(save_fp_regs_mode_);
}
bool MustBeInStubCache() {
// All stubs must be registered in the stub cache
// otherwise IncrementalMarker would not be able to find
// and patch it.
return true;
}
void Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
class ObjectBits: public BitField<int, 0, 4> {};
class ValueBits: public BitField<int, 4, 4> {};
class AddressBits: public BitField<int, 8, 4> {};
class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
Register object_;
Register value_;
Register address_;
RememberedSetAction remembered_set_action_;
SaveFPRegsMode save_fp_regs_mode_;
Label slow_;
RegisterAllocation regs_;
};
// Enter C code from generated RegExp code in a way that allows // Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC. // the C code to fix the return address in case of a GC.
// Currently only needed on ARM. // Currently only needed on ARM.
@ -816,8 +575,6 @@ class StringDictionaryLookupStub: public CodeStub {
Register r0, Register r0,
Register r1); Register r1);
virtual bool SometimesSetsUpAFrame() { return false; }
private: private:
static const int kInlinedProbes = 4; static const int kInlinedProbes = 4;
static const int kTotalProbes = 20; static const int kTotalProbes = 20;
@ -830,7 +587,7 @@ class StringDictionaryLookupStub: public CodeStub {
StringDictionary::kHeaderSize + StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize; StringDictionary::kElementsStartIndex * kPointerSize;
Major MajorKey() { return StringDictionaryLookup; } Major MajorKey() { return StringDictionaryNegativeLookup; }
int MinorKey() { int MinorKey() {
return LookupModeBits::encode(mode_); return LookupModeBits::encode(mode_);

8
deps/v8/src/arm/codegen-arm.cc

@ -38,16 +38,12 @@ namespace internal {
// Platform-specific RuntimeCallHelper functions. // Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterFrame(StackFrame::INTERNAL); masm->EnterInternalFrame();
ASSERT(!masm->has_frame());
masm->set_has_frame(true);
} }
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->LeaveFrame(StackFrame::INTERNAL); masm->LeaveInternalFrame();
ASSERT(masm->has_frame());
masm->set_has_frame(false);
} }

10
deps/v8/src/arm/codegen-arm.h

@ -69,6 +69,16 @@ class CodeGenerator: public AstVisitor {
int pos, int pos,
bool right_here = false); bool right_here = false);
// Constants related to patching of inlined load/store.
static int GetInlinedKeyedLoadInstructionsAfterPatch() {
return FLAG_debug_code ? 32 : 13;
}
static const int kInlinedKeyedStoreInstructionsAfterPatch = 8;
static int GetInlinedNamedStoreInstructionsAfterPatch() {
ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
return Isolate::Current()->inlined_write_barrier_size() + 4;
}
private: private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator); DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
}; };

82
deps/v8/src/arm/debug-arm.cc

@ -132,58 +132,56 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
static void Generate_DebugBreakCallHelper(MacroAssembler* masm, static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs, RegList object_regs,
RegList non_object_regs) { RegList non_object_regs) {
{ __ EnterInternalFrame();
FrameScope scope(masm, StackFrame::INTERNAL);
// Store the registers containing live values on the expression stack to
// Store the registers containing live values on the expression stack to // make sure that these are correctly updated during GC. Non object values
// make sure that these are correctly updated during GC. Non object values // are stored as a smi causing it to be untouched by GC.
// are stored as a smi causing it to be untouched by GC. ASSERT((object_regs & ~kJSCallerSaved) == 0);
ASSERT((object_regs & ~kJSCallerSaved) == 0); ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
ASSERT((non_object_regs & ~kJSCallerSaved) == 0); ASSERT((object_regs & non_object_regs) == 0);
ASSERT((object_regs & non_object_regs) == 0); if ((object_regs | non_object_regs) != 0) {
if ((object_regs | non_object_regs) != 0) { for (int i = 0; i < kNumJSCallerSaved; i++) {
for (int i = 0; i < kNumJSCallerSaved; i++) { int r = JSCallerSavedCode(i);
int r = JSCallerSavedCode(i); Register reg = { r };
Register reg = { r }; if ((non_object_regs & (1 << r)) != 0) {
if ((non_object_regs & (1 << r)) != 0) { if (FLAG_debug_code) {
if (FLAG_debug_code) { __ tst(reg, Operand(0xc0000000));
__ tst(reg, Operand(0xc0000000)); __ Assert(eq, "Unable to encode value as smi");
__ Assert(eq, "Unable to encode value as smi");
}
__ mov(reg, Operand(reg, LSL, kSmiTagSize));
} }
__ mov(reg, Operand(reg, LSL, kSmiTagSize));
} }
__ stm(db_w, sp, object_regs | non_object_regs);
} }
__ stm(db_w, sp, object_regs | non_object_regs);
}
#ifdef DEBUG #ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over"); __ RecordComment("// Calling from debug break to runtime - come in - over");
#endif #endif
__ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments
__ mov(r1, Operand(ExternalReference::debug_break(masm->isolate()))); __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
CEntryStub ceb(1); CEntryStub ceb(1);
__ CallStub(&ceb); __ CallStub(&ceb);
// Restore the register values from the expression stack. // Restore the register values from the expression stack.
if ((object_regs | non_object_regs) != 0) { if ((object_regs | non_object_regs) != 0) {
__ ldm(ia_w, sp, object_regs | non_object_regs); __ ldm(ia_w, sp, object_regs | non_object_regs);
for (int i = 0; i < kNumJSCallerSaved; i++) { for (int i = 0; i < kNumJSCallerSaved; i++) {
int r = JSCallerSavedCode(i); int r = JSCallerSavedCode(i);
Register reg = { r }; Register reg = { r };
if ((non_object_regs & (1 << r)) != 0) { if ((non_object_regs & (1 << r)) != 0) {
__ mov(reg, Operand(reg, LSR, kSmiTagSize)); __ mov(reg, Operand(reg, LSR, kSmiTagSize));
} }
if (FLAG_debug_code && if (FLAG_debug_code &&
(((object_regs |non_object_regs) & (1 << r)) == 0)) { (((object_regs |non_object_regs) & (1 << r)) == 0)) {
__ mov(reg, Operand(kDebugZapValue)); __ mov(reg, Operand(kDebugZapValue));
}
} }
} }
// Leave the internal frame.
} }
__ LeaveInternalFrame();
// Now that the break point has been handled, resume normal execution by // Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was // jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX. // overwritten by the address of DebugBreakXXX.

34
deps/v8/src/arm/deoptimizer-arm.cc

@ -112,19 +112,12 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
} }
#endif #endif
Isolate* isolate = code->GetIsolate();
// Add the deoptimizing code to the list. // Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
DeoptimizerData* data = isolate->deoptimizer_data(); DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
node->set_next(data->deoptimizing_code_list_); node->set_next(data->deoptimizing_code_list_);
data->deoptimizing_code_list_ = node; data->deoptimizing_code_list_ = node;
// We might be in the middle of incremental marking with compaction.
// Tell collector to treat this code object in a special way and
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(code);
// Set the code for the function to non-optimized version. // Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code()); function->ReplaceCode(function->shared()->code());
@ -141,8 +134,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
} }
void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
Address pc_after,
Code* check_code, Code* check_code,
Code* replacement_code) { Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize; const int kInstrSize = Assembler::kInstrSize;
@ -177,13 +169,6 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
reinterpret_cast<uint32_t>(check_code->entry())); reinterpret_cast<uint32_t>(check_code->entry()));
Memory::uint32_at(stack_check_address_pointer) = Memory::uint32_at(stack_check_address_pointer) =
reinterpret_cast<uint32_t>(replacement_code->entry()); reinterpret_cast<uint32_t>(replacement_code->entry());
RelocInfo rinfo(pc_after - 2 * kInstrSize,
RelocInfo::CODE_TARGET,
0,
unoptimized_code);
unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode(
unoptimized_code, &rinfo, replacement_code);
} }
@ -208,9 +193,6 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
reinterpret_cast<uint32_t>(replacement_code->entry())); reinterpret_cast<uint32_t>(replacement_code->entry()));
Memory::uint32_at(stack_check_address_pointer) = Memory::uint32_at(stack_check_address_pointer) =
reinterpret_cast<uint32_t>(check_code->entry()); reinterpret_cast<uint32_t>(check_code->entry());
check_code->GetHeap()->incremental_marking()->
RecordCodeTargetPatch(pc_after - 2 * kInstrSize, check_code);
} }
@ -650,10 +632,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(r5, Operand(ExternalReference::isolate_address())); __ mov(r5, Operand(ExternalReference::isolate_address()));
__ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate. __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
// Call Deoptimizer::New(). // Call Deoptimizer::New().
{ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
}
// Preserve "deoptimizer" object in register r0 and get the input // Preserve "deoptimizer" object in register r0 and get the input
// frame descriptor pointer to r1 (deoptimizer->input_); // frame descriptor pointer to r1 (deoptimizer->input_);
@ -707,11 +686,8 @@ void Deoptimizer::EntryGenerator::Generate() {
// r0: deoptimizer object; r1: scratch. // r0: deoptimizer object; r1: scratch.
__ PrepareCallCFunction(1, r1); __ PrepareCallCFunction(1, r1);
// Call Deoptimizer::ComputeOutputFrames(). // Call Deoptimizer::ComputeOutputFrames().
{ __ CallCFunction(
AllowExternalCallThatCantCauseGC scope(masm()); ExternalReference::compute_output_frames_function(isolate), 1);
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate), 1);
}
__ pop(r0); // Restore deoptimizer object (class Deoptimizer). __ pop(r0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames. // Replace the current (input) frame with the output frames.

10
deps/v8/src/arm/frames-arm.h

@ -70,16 +70,6 @@ static const RegList kCalleeSaved =
1 << 10 | // r10 v7 1 << 10 | // r10 v7
1 << 11; // r11 v8 (fp in JavaScript code) 1 << 11; // r11 v8 (fp in JavaScript code)
// When calling into C++ (only for C++ calls that can't cause a GC).
// The call code will take care of lr, fp, etc.
static const RegList kCallerSaved =
1 << 0 | // r0
1 << 1 | // r1
1 << 2 | // r2
1 << 3 | // r3
1 << 9; // r9
static const int kNumCalleeSaved = 7 + kR9Available; static const int kNumCalleeSaved = 7 + kR9Available;
// Double registers d8 to d15 are callee-saved. // Double registers d8 to d15 are callee-saved.

195
deps/v8/src/arm/full-codegen-arm.cc

@ -39,7 +39,6 @@
#include "stub-cache.h" #include "stub-cache.h"
#include "arm/code-stubs-arm.h" #include "arm/code-stubs-arm.h"
#include "arm/macro-assembler-arm.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -156,11 +155,6 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ bind(&ok); __ bind(&ok);
} }
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
int locals_count = info->scope()->num_stack_slots(); int locals_count = info->scope()->num_stack_slots();
__ Push(lr, fp, cp, r1); __ Push(lr, fp, cp, r1);
@ -206,12 +200,13 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Load parameter from stack. // Load parameter from stack.
__ ldr(r0, MemOperand(fp, parameter_offset)); __ ldr(r0, MemOperand(fp, parameter_offset));
// Store it in the context. // Store it in the context.
MemOperand target = ContextOperand(cp, var->index()); __ mov(r1, Operand(Context::SlotOffset(var->index())));
__ str(r0, target); __ str(r0, MemOperand(cp, r1));
// Update the write barrier. This clobbers all involved
// Update the write barrier. // registers, so we have to use two more registers to avoid
__ RecordWriteContextSlot( // clobbering cp.
cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs); __ mov(r2, Operand(cp));
__ RecordWrite(r2, Operand(r1), r3, r0);
} }
} }
} }
@ -670,15 +665,12 @@ void FullCodeGenerator::SetVar(Variable* var,
ASSERT(!scratch1.is(src)); ASSERT(!scratch1.is(src));
MemOperand location = VarOperand(var, scratch0); MemOperand location = VarOperand(var, scratch0);
__ str(src, location); __ str(src, location);
// Emit the write barrier code if the location is in the heap. // Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) { if (var->IsContextSlot()) {
__ RecordWriteContextSlot(scratch0, __ RecordWrite(scratch0,
location.offset(), Operand(Context::SlotOffset(var->index())),
src, scratch1,
scratch1, src);
kLRHasBeenSaved,
kDontSaveFPRegs);
} }
} }
@ -754,14 +746,8 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
__ str(result_register(), ContextOperand(cp, variable->index())); __ str(result_register(), ContextOperand(cp, variable->index()));
int offset = Context::SlotOffset(variable->index()); int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi. // We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(cp, __ mov(r1, Operand(cp));
offset, __ RecordWrite(r1, Operand(offset), r2, result_register());
result_register(),
r2,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS); PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
} else if (mode == Variable::CONST || mode == Variable::LET) { } else if (mode == Variable::CONST || mode == Variable::LET) {
Comment cmnt(masm_, "[ Declaration"); Comment cmnt(masm_, "[ Declaration");
@ -1225,17 +1211,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == Variable::DYNAMIC_LOCAL) { } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed(); Variable* local = var->local_if_not_shadowed();
__ ldr(r0, ContextSlotOperandCheckExtensions(local, slow)); __ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
if (local->mode() == Variable::CONST || if (local->mode() == Variable::CONST) {
local->mode() == Variable::LET) {
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex); __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
if (local->mode() == Variable::CONST) { __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
} else { // Variable::LET
__ b(ne, done);
__ mov(r0, Operand(var->name()));
__ push(r0);
__ CallRuntime(Runtime::kThrowReferenceError, 1);
}
} }
__ jmp(done); __ jmp(done);
} }
@ -1512,23 +1490,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForAccumulatorValue(subexpr); VisitForAccumulatorValue(subexpr);
// Store the subexpression value in the array's elements. // Store the subexpression value in the array's elements.
__ ldr(r6, MemOperand(sp)); // Copy of array literal. __ ldr(r1, MemOperand(sp)); // Copy of array literal.
__ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset)); __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
int offset = FixedArray::kHeaderSize + (i * kPointerSize); int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ str(result_register(), FieldMemOperand(r1, offset)); __ str(result_register(), FieldMemOperand(r1, offset));
Label no_map_change;
__ JumpIfSmi(result_register(), &no_map_change);
// Update the write barrier for the array store with r0 as the scratch // Update the write barrier for the array store with r0 as the scratch
// register. // register.
__ RecordWriteField( __ RecordWrite(r1, Operand(offset), r2, result_register());
r1, offset, result_register(), r2, kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
__ CheckFastSmiOnlyElements(r3, r2, &no_map_change);
__ push(r6); // Copy of array literal.
__ CallRuntime(Runtime::kNonSmiElementStored, 1);
__ bind(&no_map_change);
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS); PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
} }
@ -1900,8 +1869,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// RecordWrite may destroy all its register arguments. // RecordWrite may destroy all its register arguments.
__ mov(r3, result_register()); __ mov(r3, result_register());
int offset = Context::SlotOffset(var->index()); int offset = Context::SlotOffset(var->index());
__ RecordWriteContextSlot( __ RecordWrite(r1, Operand(offset), r2, r3);
r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
} }
} }
@ -1919,9 +1887,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ str(r0, location); __ str(r0, location);
if (var->IsContextSlot()) { if (var->IsContextSlot()) {
__ mov(r3, r0); __ mov(r3, r0);
int offset = Context::SlotOffset(var->index()); __ RecordWrite(r1, Operand(Context::SlotOffset(var->index())), r2, r3);
__ RecordWriteContextSlot(
r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
} }
} else { } else {
ASSERT(var->IsLookupSlot()); ASSERT(var->IsLookupSlot());
@ -2696,24 +2662,20 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS // Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class. // functions to make sure they have 'Function' as their class.
// Assume that there are only two callable types, and one of them is at
// either end of the type range for JS object types. Saves extra comparisons.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE); __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
// Map is now in r0. // Map is now in r0.
__ b(lt, &null); __ b(lt, &null);
STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
FIRST_SPEC_OBJECT_TYPE + 1); // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
__ b(eq, &function); // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
// LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
__ cmp(r1, Operand(LAST_SPEC_OBJECT_TYPE)); STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
LAST_SPEC_OBJECT_TYPE - 1); LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
__ b(eq, &function); __ cmp(r1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
// Assume that there is no larger type. __ b(ge, &function);
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a function.
// Check if the constructor in the map is a JS function.
__ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset)); __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
__ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE); __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
__ b(ne, &non_function_constructor); __ b(ne, &non_function_constructor);
@ -2891,9 +2853,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
__ str(r0, FieldMemOperand(r1, JSValue::kValueOffset)); __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
// Update the write barrier. Save the value as it will be // Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward. // overwritten by the write barrier code and is needed afterward.
__ mov(r2, r0); __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
__ RecordWriteField(
r1, JSValue::kValueOffset, r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
__ bind(&done); __ bind(&done);
context()->Plug(r0); context()->Plug(r0);
@ -3181,31 +3141,16 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ str(scratch1, MemOperand(index2, 0)); __ str(scratch1, MemOperand(index2, 0));
__ str(scratch2, MemOperand(index1, 0)); __ str(scratch2, MemOperand(index1, 0));
Label no_remembered_set; Label new_space;
__ CheckPageFlag(elements, __ InNewSpace(elements, scratch1, eq, &new_space);
scratch1,
1 << MemoryChunk::SCAN_ON_SCAVENGE,
ne,
&no_remembered_set);
// Possible optimization: do a check that both values are Smis // Possible optimization: do a check that both values are Smis
// (or them and test against Smi mask.) // (or them and test against Smi mask.)
// We are swapping two objects in an array and the incremental marker never __ mov(scratch1, elements);
// pauses in the middle of scanning a single object. Therefore the __ RecordWriteHelper(elements, index1, scratch2);
// incremental marker is not disturbed, so we don't need to call the __ RecordWriteHelper(scratch1, index2, scratch2); // scratch1 holds elements.
// RecordWrite stub that notifies the incremental marker.
__ RememberedSetHelper(elements,
index1,
scratch2,
kDontSaveFPRegs,
MacroAssembler::kFallThroughAtEnd);
__ RememberedSetHelper(elements,
index2,
scratch2,
kDontSaveFPRegs,
MacroAssembler::kFallThroughAtEnd);
__ bind(&no_remembered_set); __ bind(&new_space);
// We are done. Drop elements from the stack, and return undefined. // We are done. Drop elements from the stack, and return undefined.
__ Drop(3); __ Drop(3);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex); __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
@ -3953,14 +3898,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Handle<String> check) { Handle<String> check,
Label materialize_true, materialize_false; Label* if_true,
Label* if_true = NULL; Label* if_false,
Label* if_false = NULL; Label* fall_through) {
Label* fall_through = NULL;
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
{ AccumulatorValueContext context(this); { AccumulatorValueContext context(this);
VisitForTypeofValue(expr); VisitForTypeofValue(expr);
} }
@ -4001,11 +3942,9 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else if (check->Equals(isolate()->heap()->function_symbol())) { } else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(r0, if_false); __ JumpIfSmi(r0, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ CompareObjectType(r0, r1, r0, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
__ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE); Split(ge, if_true, if_false, fall_through);
__ b(eq, if_true);
__ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE));
Split(eq, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_symbol())) { } else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(r0, if_false); __ JumpIfSmi(r0, if_false);
if (!FLAG_harmony_typeof) { if (!FLAG_harmony_typeof) {
@ -4024,7 +3963,18 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else { } else {
if (if_false != fall_through) __ jmp(if_false); if (if_false != fall_through) __ jmp(if_false);
} }
context()->Plug(if_true, if_false); }
void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
Label* if_true,
Label* if_false,
Label* fall_through) {
VisitForAccumulatorValue(expr);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
Split(eq, if_true, if_false, fall_through);
} }
@ -4032,12 +3982,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation"); Comment cmnt(masm_, "[ CompareOperation");
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
if (TryLiteralCompare(expr)) return;
// Always perform the comparison for its control flow. Pack the result // Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed. // into the expression's context after the comparison is performed.
Label materialize_true, materialize_false; Label materialize_true, materialize_false;
Label* if_true = NULL; Label* if_true = NULL;
Label* if_false = NULL; Label* if_false = NULL;
@ -4045,6 +3992,13 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
context()->PrepareTest(&materialize_true, &materialize_false, context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through); &if_true, &if_false, &fall_through);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
context()->Plug(if_true, if_false);
return;
}
Token::Value op = expr->op(); Token::Value op = expr->op();
VisitForStackValue(expr->left()); VisitForStackValue(expr->left());
switch (op) { switch (op) {
@ -4131,9 +4085,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
} }
void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
Expression* sub_expr, Comment cmnt(masm_, "[ CompareToNull");
NilValue nil) {
Label materialize_true, materialize_false; Label materialize_true, materialize_false;
Label* if_true = NULL; Label* if_true = NULL;
Label* if_false = NULL; Label* if_false = NULL;
@ -4141,21 +4094,15 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
context()->PrepareTest(&materialize_true, &materialize_false, context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through); &if_true, &if_false, &fall_through);
VisitForAccumulatorValue(sub_expr); VisitForAccumulatorValue(expr->expression());
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Heap::RootListIndex nil_value = nil == kNullValue ? __ LoadRoot(r1, Heap::kNullValueRootIndex);
Heap::kNullValueRootIndex :
Heap::kUndefinedValueRootIndex;
__ LoadRoot(r1, nil_value);
__ cmp(r0, r1); __ cmp(r0, r1);
if (expr->op() == Token::EQ_STRICT) { if (expr->is_strict()) {
Split(eq, if_true, if_false, fall_through); Split(eq, if_true, if_false, fall_through);
} else { } else {
Heap::RootListIndex other_nil_value = nil == kNullValue ?
Heap::kUndefinedValueRootIndex :
Heap::kNullValueRootIndex;
__ b(eq, if_true); __ b(eq, if_true);
__ LoadRoot(r1, other_nil_value); __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
__ cmp(r0, r1); __ cmp(r0, r1);
__ b(eq, if_true); __ b(eq, if_true);
__ JumpIfSmi(r0, if_false); __ JumpIfSmi(r0, if_false);

149
deps/v8/src/arm/ic-arm.cc

@ -208,8 +208,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Update the write barrier. Make sure not to clobber the value. // Update the write barrier. Make sure not to clobber the value.
__ mov(scratch1, value); __ mov(scratch1, value);
__ RecordWrite( __ RecordWrite(elements, scratch2, scratch1);
elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
} }
@ -505,22 +504,21 @@ static void GenerateCallMiss(MacroAssembler* masm,
// Get the receiver of the function from the stack. // Get the receiver of the function from the stack.
__ ldr(r3, MemOperand(sp, argc * kPointerSize)); __ ldr(r3, MemOperand(sp, argc * kPointerSize));
{ __ EnterInternalFrame();
FrameScope scope(masm, StackFrame::INTERNAL);
// Push the receiver and the name of the function. // Push the receiver and the name of the function.
__ Push(r3, r2); __ Push(r3, r2);
// Call the entry. // Call the entry.
__ mov(r0, Operand(2)); __ mov(r0, Operand(2));
__ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate))); __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
CEntryStub stub(1); CEntryStub stub(1);
__ CallStub(&stub); __ CallStub(&stub);
// Move result to r1 and leave the internal frame. // Move result to r1 and leave the internal frame.
__ mov(r1, Operand(r0)); __ mov(r1, Operand(r0));
} __ LeaveInternalFrame();
// Check if the receiver is a global object of some sort. // Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC. // This can happen only for regular CallIC but not KeyedCallIC.
@ -652,13 +650,12 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// This branch is taken when calling KeyedCallIC_Miss is neither required // This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial. // nor beneficial.
__ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3); __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3);
{ __ EnterInternalFrame();
FrameScope scope(masm, StackFrame::INTERNAL); __ push(r2); // save the key
__ push(r2); // save the key __ Push(r1, r2); // pass the receiver and the key
__ Push(r1, r2); // pass the receiver and the key __ CallRuntime(Runtime::kKeyedGetProperty, 2);
__ CallRuntime(Runtime::kKeyedGetProperty, 2); __ pop(r2); // restore the key
__ pop(r2); // restore the key __ LeaveInternalFrame();
}
__ mov(r1, r0); __ mov(r1, r0);
__ jmp(&do_call); __ jmp(&do_call);
@ -911,8 +908,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, &notin, &slow); GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, &notin, &slow);
__ str(r0, mapped_location); __ str(r0, mapped_location);
__ add(r6, r3, r5); __ add(r6, r3, r5);
__ mov(r9, r0); __ RecordWrite(r3, r6, r9);
__ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ Ret(); __ Ret();
__ bind(&notin); __ bind(&notin);
// The unmapped lookup expects that the parameter map is in r3. // The unmapped lookup expects that the parameter map is in r3.
@ -920,8 +916,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow); GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow);
__ str(r0, unmapped_location); __ str(r0, unmapped_location);
__ add(r6, r3, r4); __ add(r6, r3, r4);
__ mov(r9, r0); __ RecordWrite(r3, r6, r9);
__ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ Ret(); __ Ret();
__ bind(&slow); __ bind(&slow);
GenerateMiss(masm, false); GenerateMiss(masm, false);
@ -1272,17 +1267,13 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- r2 : receiver // -- r2 : receiver
// -- lr : return address // -- lr : return address
// ----------------------------------- // -----------------------------------
Label slow, array, extra, check_if_double_array; Label slow, fast, array, extra;
Label fast_object_with_map_check, fast_object_without_map_check;
Label fast_double_with_map_check, fast_double_without_map_check;
// Register usage. // Register usage.
Register value = r0; Register value = r0;
Register key = r1; Register key = r1;
Register receiver = r2; Register receiver = r2;
Register elements = r3; // Elements array of the receiver. Register elements = r3; // Elements array of the receiver.
Register elements_map = r6;
Register receiver_map = r7;
// r4 and r5 are used as general scratch registers. // r4 and r5 are used as general scratch registers.
// Check that the key is a smi. // Check that the key is a smi.
@ -1290,26 +1281,35 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// Check that the object isn't a smi. // Check that the object isn't a smi.
__ JumpIfSmi(receiver, &slow); __ JumpIfSmi(receiver, &slow);
// Get the map of the object. // Get the map of the object.
__ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need // Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks. // to do this because this generic stub does not perform map checks.
__ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); __ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded)); __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &slow); __ b(ne, &slow);
// Check if the object is a JS array or not. // Check if the object is a JS array or not.
__ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset)); __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ cmp(r4, Operand(JS_ARRAY_TYPE)); __ cmp(r4, Operand(JS_ARRAY_TYPE));
__ b(eq, &array); __ b(eq, &array);
// Check that the object is some kind of JSObject. // Check that the object is some kind of JSObject.
__ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE));
__ b(lt, &slow); __ b(lt, &slow);
__ cmp(r4, Operand(JS_PROXY_TYPE));
__ b(eq, &slow);
__ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(eq, &slow);
// Object case: Check key against length in the elements array. // Object case: Check key against length in the elements array.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check that the object is in fast mode and writable.
__ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r4, ip);
__ b(ne, &slow);
// Check array bounds. Both the key and the length of FixedArray are smis. // Check array bounds. Both the key and the length of FixedArray are smis.
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(ip)); __ cmp(key, Operand(ip));
__ b(lo, &fast_object_with_map_check); __ b(lo, &fast);
// Slow case, handle jump to runtime. // Slow case, handle jump to runtime.
__ bind(&slow); __ bind(&slow);
@ -1330,31 +1330,21 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(ip)); __ cmp(key, Operand(ip));
__ b(hs, &slow); __ b(hs, &slow);
__ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
__ cmp(elements_map,
Operand(masm->isolate()->factory()->fixed_array_map()));
__ b(ne, &check_if_double_array);
// Calculate key + 1 as smi. // Calculate key + 1 as smi.
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ add(r4, key, Operand(Smi::FromInt(1))); __ add(r4, key, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ b(&fast_object_without_map_check); __ b(&fast);
__ bind(&check_if_double_array);
__ cmp(elements_map,
Operand(masm->isolate()->factory()->fixed_double_array_map()));
__ b(ne, &slow);
// Add 1 to key, and go to common element store code for doubles.
STATIC_ASSERT(kSmiTag == 0);
__ add(r4, key, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ jmp(&fast_double_without_map_check);
// Array case: Get the length and the elements array from the JS // Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it // array. Check that the array is in fast mode (and writable); if it
// is the length is always a smi. // is the length is always a smi.
__ bind(&array); __ bind(&array);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r4, ip);
__ b(ne, &slow);
// Check the key against the length in the array. // Check the key against the length in the array.
__ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset)); __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
@ -1362,57 +1352,18 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ b(hs, &extra); __ b(hs, &extra);
// Fall through to fast case. // Fall through to fast case.
__ bind(&fast_object_with_map_check); __ bind(&fast);
Register scratch_value = r4; // Fast case, store the value to the elements backing store.
Register address = r5; __ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); __ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
__ cmp(elements_map, __ str(value, MemOperand(r5));
Operand(masm->isolate()->factory()->fixed_array_map())); // Skip write barrier if the written value is a smi.
__ b(ne, &fast_double_with_map_check); __ tst(value, Operand(kSmiTagMask));
__ bind(&fast_object_without_map_check); __ Ret(eq);
// Smi stores don't require further checks.
Label non_smi_value;
__ JumpIfNotSmi(value, &non_smi_value);
// It's irrelevant whether array is smi-only or not when writing a smi.
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value, MemOperand(address));
__ Ret();
__ bind(&non_smi_value);
// Escape to slow case when writing non-smi into smi-only array.
__ CheckFastObjectElements(receiver_map, scratch_value, &slow);
// Fast elements array, store the value to the elements backing store.
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value, MemOperand(address));
// Update write barrier for the elements array address. // Update write barrier for the elements array address.
__ mov(scratch_value, value); // Preserve the value which is returned. __ sub(r4, r5, Operand(elements));
__ RecordWrite(elements, __ RecordWrite(elements, Operand(r4), r5, r6);
address,
scratch_value,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Ret();
__ bind(&fast_double_with_map_check);
// Check for fast double array case. If this fails, call through to the
// runtime.
__ cmp(elements_map,
Operand(masm->isolate()->factory()->fixed_double_array_map()));
__ b(ne, &slow);
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value,
key,
receiver,
elements,
r4,
r5,
r6,
r7,
&slow);
__ Ret(); __ Ret();
} }

38
deps/v8/src/arm/lithium-arm.cc

@ -212,11 +212,10 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
} }
void LIsNilAndBranch::PrintDataTo(StringStream* stream) { void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if "); stream->Add("if ");
InputAt(0)->PrintTo(stream); InputAt(0)->PrintTo(stream);
stream->Add(kind() == kStrictEquality ? " === " : " == "); stream->Add(is_strict() ? " === null" : " == null");
stream->Add(nil() == kNullValue ? "null" : "undefined");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
} }
@ -712,9 +711,7 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment(); HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0; instr->set_environment(CreateEnvironment(hydrogen_env));
instr->set_environment(CreateEnvironment(hydrogen_env,
&argument_index_accumulator));
return instr; return instr;
} }
@ -997,13 +994,10 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
} }
LEnvironment* LChunkBuilder::CreateEnvironment( LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
HEnvironment* hydrogen_env,
int* argument_index_accumulator) {
if (hydrogen_env == NULL) return NULL; if (hydrogen_env == NULL) return NULL;
LEnvironment* outer = LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id(); int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber); ASSERT(ast_id != AstNode::kNoNumber);
int value_count = hydrogen_env->length(); int value_count = hydrogen_env->length();
@ -1013,6 +1007,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
argument_count_, argument_count_,
value_count, value_count,
outer); outer);
int argument_index = 0;
for (int i = 0; i < value_count; ++i) { for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue; if (hydrogen_env->is_special_index(i)) continue;
@ -1021,7 +1016,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
if (value->IsArgumentsObject()) { if (value->IsArgumentsObject()) {
op = NULL; op = NULL;
} else if (value->IsPushArgument()) { } else if (value->IsPushArgument()) {
op = new LArgument((*argument_index_accumulator)++); op = new LArgument(argument_index++);
} else { } else {
op = UseAny(value); op = UseAny(value);
} }
@ -1449,9 +1444,9 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
} }
LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) { LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged()); ASSERT(instr->value()->representation().IsTagged());
return new LIsNilAndBranch(UseRegisterAtStart(instr->value())); return new LIsNullAndBranch(UseRegisterAtStart(instr->value()));
} }
@ -1739,7 +1734,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) { LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LLoadGlobalCell* result = new LLoadGlobalCell; LLoadGlobalCell* result = new LLoadGlobalCell;
return instr->RequiresHoleCheck() return instr->check_hole_value()
? AssignEnvironment(DefineAsRegister(result)) ? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result); : DefineAsRegister(result);
} }
@ -1753,11 +1748,14 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) { LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LOperand* temp = TempRegister(); if (instr->check_hole_value()) {
LOperand* value = UseTempRegister(instr->value()); LOperand* temp = TempRegister();
LInstruction* result = new LStoreGlobalCell(value, temp); LOperand* value = UseRegister(instr->value());
if (instr->RequiresHoleCheck()) result = AssignEnvironment(result); return AssignEnvironment(new LStoreGlobalCell(value, temp));
return result; } else {
LOperand* value = UseRegisterAtStart(instr->value());
return new LStoreGlobalCell(value, NULL);
}
} }

16
deps/v8/src/arm/lithium-arm.h

@ -107,7 +107,7 @@ class LCodeGen;
V(Integer32ToDouble) \ V(Integer32ToDouble) \
V(InvokeFunction) \ V(InvokeFunction) \
V(IsConstructCallAndBranch) \ V(IsConstructCallAndBranch) \
V(IsNilAndBranch) \ V(IsNullAndBranch) \
V(IsObjectAndBranch) \ V(IsObjectAndBranch) \
V(IsSmiAndBranch) \ V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \ V(IsUndetectableAndBranch) \
@ -627,17 +627,16 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
}; };
class LIsNilAndBranch: public LControlInstruction<1, 0> { class LIsNullAndBranch: public LControlInstruction<1, 0> {
public: public:
explicit LIsNilAndBranch(LOperand* value) { explicit LIsNullAndBranch(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
} }
DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch") DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch) DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
EqualityKind kind() const { return hydrogen()->kind(); } bool is_strict() const { return hydrogen()->is_strict(); }
NilValue nil() const { return hydrogen()->nil(); }
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
}; };
@ -2160,8 +2159,7 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* instr, int ast_id); LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment(); void ClearInstructionPendingDeoptimizationEnvironment();
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env, LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
int* argument_index_accumulator);
void VisitInstruction(HInstruction* current); void VisitInstruction(HInstruction* current);

182
deps/v8/src/arm/lithium-codegen-arm.cc

@ -82,14 +82,6 @@ bool LCodeGen::GenerateCode() {
status_ = GENERATING; status_ = GENERATING;
CpuFeatures::Scope scope1(VFP3); CpuFeatures::Scope scope1(VFP3);
CpuFeatures::Scope scope2(ARMv7); CpuFeatures::Scope scope2(ARMv7);
CodeStub::GenerateFPStubs();
// Open a frame scope to indicate that there is a frame on the stack. The
// NONE indicates that the scope shouldn't actually generate code to set up
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::NONE);
return GeneratePrologue() && return GeneratePrologue() &&
GenerateBody() && GenerateBody() &&
GenerateDeferredCode() && GenerateDeferredCode() &&
@ -214,11 +206,13 @@ bool LCodeGen::GeneratePrologue() {
// Load parameter from stack. // Load parameter from stack.
__ ldr(r0, MemOperand(fp, parameter_offset)); __ ldr(r0, MemOperand(fp, parameter_offset));
// Store it in the context. // Store it in the context.
MemOperand target = ContextOperand(cp, var->index()); __ mov(r1, Operand(Context::SlotOffset(var->index())));
__ str(r0, target); __ str(r0, MemOperand(cp, r1));
// Update the write barrier. This clobbers r3 and r0. // Update the write barrier. This clobbers all involved
__ RecordWriteContextSlot( // registers, so we have to use two more registers to avoid
cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs); // clobbering cp.
__ mov(r2, Operand(cp));
__ RecordWrite(r2, Operand(r1), r3, r0);
} }
} }
Comment(";;; End allocate local context"); Comment(";;; End allocate local context");
@ -268,9 +262,6 @@ bool LCodeGen::GenerateDeferredCode() {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i]; LDeferredCode* code = deferred_[i];
__ bind(code->entry()); __ bind(code->entry());
Comment(";;; Deferred code @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
code->Generate(); code->Generate();
__ jmp(code->exit()); __ jmp(code->exit());
} }
@ -748,7 +739,7 @@ void LCodeGen::RecordSafepoint(
int deoptimization_index) { int deoptimization_index) {
ASSERT(expected_safepoint_kind_ == kind); ASSERT(expected_safepoint_kind_ == kind);
const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(), Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
kind, arguments, deoptimization_index); kind, arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) { for (int i = 0; i < operands->length(); i++) {
@ -1041,7 +1032,6 @@ void LCodeGen::DoDivI(LDivI* instr) {
virtual void Generate() { virtual void Generate() {
codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV); codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
} }
virtual LInstruction* instr() { return instr_; }
private: private:
LDivI* instr_; LDivI* instr_;
}; };
@ -1753,35 +1743,25 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
} }
void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) { void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Register scratch = scratch0(); Register scratch = scratch0();
Register reg = ToRegister(instr->InputAt(0)); Register reg = ToRegister(instr->InputAt(0));
int false_block = chunk_->LookupDestination(instr->false_block_id());
// If the expression is known to be untagged or a smi, then it's definitely // TODO(fsc): If the expression is known to be a smi, then it's
// not null, and it can't be a an undetectable object. // definitely not null. Jump to the false block.
if (instr->hydrogen()->representation().IsSpecialization() ||
instr->hydrogen()->type().IsSmi()) {
EmitGoto(false_block);
return;
}
int true_block = chunk_->LookupDestination(instr->true_block_id()); int true_block = chunk_->LookupDestination(instr->true_block_id());
Heap::RootListIndex nil_value = instr->nil() == kNullValue ? int false_block = chunk_->LookupDestination(instr->false_block_id());
Heap::kNullValueRootIndex :
Heap::kUndefinedValueRootIndex; __ LoadRoot(ip, Heap::kNullValueRootIndex);
__ LoadRoot(ip, nil_value);
__ cmp(reg, ip); __ cmp(reg, ip);
if (instr->kind() == kStrictEquality) { if (instr->is_strict()) {
EmitBranch(true_block, false_block, eq); EmitBranch(true_block, false_block, eq);
} else { } else {
Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
Heap::kUndefinedValueRootIndex :
Heap::kNullValueRootIndex;
Label* true_label = chunk_->GetAssemblyLabel(true_block); Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block); Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ b(eq, true_label); __ b(eq, true_label);
__ LoadRoot(ip, other_nil_value); __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(reg, ip); __ cmp(reg, ip);
__ b(eq, true_label); __ b(eq, true_label);
__ JumpIfSmi(reg, false_label); __ JumpIfSmi(reg, false_label);
@ -1938,36 +1918,28 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
ASSERT(!input.is(temp)); ASSERT(!input.is(temp));
ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register. ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
__ JumpIfSmi(input, is_false); __ JumpIfSmi(input, is_false);
__ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
__ b(lt, is_false);
// Map is now in temp.
// Functions have class 'Function'.
__ CompareInstanceType(temp, temp2, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) { if (class_name->IsEqualTo(CStrVector("Function"))) {
// Assuming the following assertions, we can use the same compares to test __ b(ge, is_true);
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
FIRST_SPEC_OBJECT_TYPE + 1);
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
LAST_SPEC_OBJECT_TYPE - 1);
STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
__ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
__ b(lt, is_false);
__ b(eq, is_true);
__ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
__ b(eq, is_true);
} else { } else {
// Faster code path to avoid two compares: subtract lower bound from the __ b(ge, is_false);
// actual type and do a signed compare with the width of the type range.
__ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
__ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
__ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ b(gt, is_false);
} }
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function. // Check if the constructor in the map is a function.
__ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset)); __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
// As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type and
// FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
// LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
// Objects with a non-function constructor have class 'Object'. // Objects with a non-function constructor have class 'Object'.
__ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE); __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
if (class_name->IsEqualTo(CStrVector("Object"))) { if (class_name->IsEqualTo(CStrVector("Object"))) {
@ -2044,8 +2016,9 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
virtual void Generate() { virtual void Generate() {
codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_); codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
} }
virtual LInstruction* instr() { return instr_; }
Label* map_check() { return &map_check_; } Label* map_check() { return &map_check_; }
private: private:
LInstanceOfKnownGlobal* instr_; LInstanceOfKnownGlobal* instr_;
Label map_check_; Label map_check_;
@ -2207,7 +2180,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
__ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell()))); __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
__ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
if (instr->hydrogen()->RequiresHoleCheck()) { if (instr->hydrogen()->check_hole_value()) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip); __ cmp(result, ip);
DeoptimizeIf(eq, instr->environment()); DeoptimizeIf(eq, instr->environment());
@ -2230,7 +2203,6 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register value = ToRegister(instr->InputAt(0)); Register value = ToRegister(instr->InputAt(0));
Register scratch = scratch0(); Register scratch = scratch0();
Register scratch2 = ToRegister(instr->TempAt(0));
// Load the cell. // Load the cell.
__ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell()))); __ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
@ -2239,7 +2211,8 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// been deleted from the property dictionary. In that case, we need // been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark // to update the property details in the property dictionary to mark
// it as no longer deleted. // it as no longer deleted.
if (instr->hydrogen()->RequiresHoleCheck()) { if (instr->hydrogen()->check_hole_value()) {
Register scratch2 = ToRegister(instr->TempAt(0));
__ ldr(scratch2, __ ldr(scratch2,
FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
@ -2249,15 +2222,6 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// Store the value. // Store the value.
__ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); __ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
// Cells are always in the remembered set.
__ RecordWriteField(scratch,
JSGlobalPropertyCell::kValueOffset,
value,
scratch2,
kLRHasBeenSaved,
kSaveFPRegs,
OMIT_REMEMBERED_SET);
} }
@ -2283,15 +2247,10 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context()); Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value()); Register value = ToRegister(instr->value());
MemOperand target = ContextOperand(context, instr->slot_index()); __ str(value, ContextOperand(context, instr->slot_index()));
__ str(value, target);
if (instr->needs_write_barrier()) { if (instr->needs_write_barrier()) {
__ RecordWriteContextSlot(context, int offset = Context::SlotOffset(instr->slot_index());
target.offset(), __ RecordWrite(context, Operand(offset), value, scratch0());
value,
scratch0(),
kLRHasBeenSaved,
kSaveFPRegs);
} }
} }
@ -2541,9 +2500,13 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
} }
__ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); if (instr->hydrogen()->RequiresHoleCheck()) {
__ cmp(scratch, Operand(kHoleNanUpper32)); // TODO(danno): If no hole check is required, there is no need to allocate
DeoptimizeIf(eq, instr->environment()); // elements into a temporary register, instead scratch can be used.
__ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr->environment());
}
__ vldr(result, elements, 0); __ vldr(result, elements, 0);
} }
@ -2614,7 +2577,6 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -2944,7 +2906,6 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
virtual void Generate() { virtual void Generate() {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
} }
virtual LInstruction* instr() { return instr_; }
private: private:
LUnaryMathOperation* instr_; LUnaryMathOperation* instr_;
}; };
@ -3241,7 +3202,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->result()).is(r0)); ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity(); int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS); CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ Drop(1); __ Drop(1);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -3301,8 +3262,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ str(value, FieldMemOperand(object, offset)); __ str(value, FieldMemOperand(object, offset));
if (instr->needs_write_barrier()) { if (instr->needs_write_barrier()) {
// Update the write barrier for the object for in-object properties. // Update the write barrier for the object for in-object properties.
__ RecordWriteField( __ RecordWrite(object, Operand(offset), value, scratch);
object, offset, value, scratch, kLRHasBeenSaved, kSaveFPRegs);
} }
} else { } else {
__ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
@ -3310,8 +3270,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->needs_write_barrier()) { if (instr->needs_write_barrier()) {
// Update the write barrier for the properties array. // Update the write barrier for the properties array.
// object is used as a scratch register. // object is used as a scratch register.
__ RecordWriteField( __ RecordWrite(scratch, Operand(offset), value, object);
scratch, offset, value, object, kLRHasBeenSaved, kSaveFPRegs);
} }
} }
} }
@ -3342,13 +3301,6 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
Register scratch = scratch0(); Register scratch = scratch0();
// This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
// conversion, so it deopts in that case.
if (instr->hydrogen()->ValueNeedsSmiCheck()) {
__ tst(value, Operand(kSmiTagMask));
DeoptimizeIf(ne, instr->environment());
}
// Do the store. // Do the store.
if (instr->key()->IsConstantOperand()) { if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@ -3363,8 +3315,8 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
if (instr->hydrogen()->NeedsWriteBarrier()) { if (instr->hydrogen()->NeedsWriteBarrier()) {
// Compute address of modified element and store it into key register. // Compute address of modified element and store it into key register.
__ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ add(key, scratch, Operand(FixedArray::kHeaderSize));
__ RecordWrite(elements, key, value, kLRHasBeenSaved, kSaveFPRegs); __ RecordWrite(elements, key, value);
} }
} }
@ -3465,7 +3417,6 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -3501,7 +3452,6 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { } : LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
virtual LInstruction* instr() { return instr_; }
private: private:
LStringCharCodeAt* instr_; LStringCharCodeAt* instr_;
}; };
@ -3625,7 +3575,6 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { } : LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); } virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
virtual LInstruction* instr() { return instr_; }
private: private:
LStringCharFromCode* instr_; LStringCharFromCode* instr_;
}; };
@ -3697,7 +3646,6 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { } : LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); } virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
virtual LInstruction* instr() { return instr_; }
private: private:
LNumberTagI* instr_; LNumberTagI* instr_;
}; };
@ -3763,7 +3711,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { } : LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
virtual LInstruction* instr() { return instr_; }
private: private:
LNumberTagD* instr_; LNumberTagD* instr_;
}; };
@ -3872,6 +3819,16 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
} }
class DeferredTaggedToI: public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
private:
LTaggedToI* instr_;
};
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register input_reg = ToRegister(instr->InputAt(0)); Register input_reg = ToRegister(instr->InputAt(0));
Register scratch1 = scratch0(); Register scratch1 = scratch0();
@ -3954,16 +3911,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) { void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
class DeferredTaggedToI: public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LTaggedToI* instr_;
};
LOperand* input = instr->InputAt(0); LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister()); ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result())); ASSERT(input->Equals(instr->result()));
@ -4396,12 +4343,10 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = ne; final_branch_condition = ne;
} else if (type_name->Equals(heap()->function_symbol())) { } else if (type_name->Equals(heap()->function_symbol())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label); __ JumpIfSmi(input, false_label);
__ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE); __ CompareObjectType(input, input, scratch,
__ b(eq, true_label); FIRST_CALLABLE_SPEC_OBJECT_TYPE);
__ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE)); final_branch_condition = ge;
final_branch_condition = eq;
} else if (type_name->Equals(heap()->object_symbol())) { } else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label); __ JumpIfSmi(input, false_label);
@ -4523,7 +4468,6 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { } : LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); } virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
virtual LInstruction* instr() { return instr_; }
private: private:
LStackCheck* instr_; LStackCheck* instr_;
}; };

7
deps/v8/src/arm/lithium-codegen-arm.h

@ -376,20 +376,16 @@ class LCodeGen BASE_EMBEDDED {
class LDeferredCode: public ZoneObject { class LDeferredCode: public ZoneObject {
public: public:
explicit LDeferredCode(LCodeGen* codegen) explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen), : codegen_(codegen), external_exit_(NULL) {
external_exit_(NULL),
instruction_index_(codegen->current_instruction_) {
codegen->AddDeferredCode(this); codegen->AddDeferredCode(this);
} }
virtual ~LDeferredCode() { } virtual ~LDeferredCode() { }
virtual void Generate() = 0; virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label *exit) { external_exit_ = exit; } void SetExit(Label *exit) { external_exit_ = exit; }
Label* entry() { return &entry_; } Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }
protected: protected:
LCodeGen* codegen() const { return codegen_; } LCodeGen* codegen() const { return codegen_; }
@ -400,7 +396,6 @@ class LDeferredCode: public ZoneObject {
Label entry_; Label entry_;
Label exit_; Label exit_;
Label* external_exit_; Label* external_exit_;
int instruction_index_;
}; };
} } // namespace v8::internal } } // namespace v8::internal

566
deps/v8/src/arm/macro-assembler-arm.cc

@ -42,8 +42,7 @@ namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size), : Assembler(arg_isolate, buffer, size),
generating_stub_(false), generating_stub_(false),
allow_stub_calls_(true), allow_stub_calls_(true) {
has_frame_(false) {
if (isolate() != NULL) { if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
isolate()); isolate());
@ -407,6 +406,32 @@ void MacroAssembler::StoreRoot(Register source,
} }
void MacroAssembler::RecordWriteHelper(Register object,
Register address,
Register scratch) {
if (emit_debug_code()) {
// Check that the object is not in new space.
Label not_in_new_space;
InNewSpace(object, scratch, ne, &not_in_new_space);
Abort("new-space object passed to RecordWriteHelper");
bind(&not_in_new_space);
}
// Calculate page address.
Bfc(object, 0, kPageSizeBits);
// Calculate region number.
Ubfx(address, address, Page::kRegionSizeLog2,
kPageSizeBits - Page::kRegionSizeLog2);
// Mark region dirty.
ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
mov(ip, Operand(1));
orr(scratch, scratch, Operand(ip, LSL, address));
str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
}
void MacroAssembler::InNewSpace(Register object, void MacroAssembler::InNewSpace(Register object,
Register scratch, Register scratch,
Condition cond, Condition cond,
@ -418,52 +443,38 @@ void MacroAssembler::InNewSpace(Register object,
} }
void MacroAssembler::RecordWriteField( // Will clobber 4 registers: object, offset, scratch, ip. The
Register object, // register 'object' contains a heap object pointer. The heap object
int offset, // tag is shifted away.
Register value, void MacroAssembler::RecordWrite(Register object,
Register dst, Operand offset,
LinkRegisterStatus lr_status, Register scratch0,
SaveFPRegsMode save_fp, Register scratch1) {
RememberedSetAction remembered_set_action, // The compiled code assumes that record write doesn't change the
SmiCheck smi_check) { // context register, so we check that none of the clobbered
// First, check if a write barrier is even needed. The tests below // registers are cp.
// catch stores of Smis. ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
Label done;
// Skip barrier if writing a smi. Label done;
if (smi_check == INLINE_SMI_CHECK) {
JumpIfSmi(value, &done);
}
// Although the object register is tagged, the offset is relative to the start // First, test that the object is not in the new space. We cannot set
// of the object, so so offset must be a multiple of kPointerSize. // region marks for new space pages.
ASSERT(IsAligned(offset, kPointerSize)); InNewSpace(object, scratch0, eq, &done);
add(dst, object, Operand(offset - kHeapObjectTag)); // Add offset into the object.
if (emit_debug_code()) { add(scratch0, object, offset);
Label ok;
tst(dst, Operand((1 << kPointerSizeLog2) - 1));
b(eq, &ok);
stop("Unaligned cell in write barrier");
bind(&ok);
}
RecordWrite(object, // Record the actual write.
dst, RecordWriteHelper(object, scratch0, scratch1);
value,
lr_status,
save_fp,
remembered_set_action,
OMIT_SMI_CHECK);
bind(&done); bind(&done);
// Clobber clobbered input registers when running with the debug-code flag // Clobber all input registers when running with the debug-code flag
// turned on to provoke errors. // turned on to provoke errors.
if (emit_debug_code()) { if (emit_debug_code()) {
mov(value, Operand(BitCast<int32_t>(kZapValue + 4))); mov(object, Operand(BitCast<int32_t>(kZapValue)));
mov(dst, Operand(BitCast<int32_t>(kZapValue + 8))); mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
} }
} }
@ -473,94 +484,29 @@ void MacroAssembler::RecordWriteField(
// tag is shifted away. // tag is shifted away.
void MacroAssembler::RecordWrite(Register object, void MacroAssembler::RecordWrite(Register object,
Register address, Register address,
Register value, Register scratch) {
LinkRegisterStatus lr_status,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
// The compiled code assumes that record write doesn't change the // The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered // context register, so we check that none of the clobbered
// registers are cp. // registers are cp.
ASSERT(!address.is(cp) && !value.is(cp)); ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
Label done; Label done;
if (smi_check == INLINE_SMI_CHECK) { // First, test that the object is not in the new space. We cannot set
ASSERT_EQ(0, kSmiTag); // region marks for new space pages.
tst(value, Operand(kSmiTagMask)); InNewSpace(object, scratch, eq, &done);
b(eq, &done);
}
CheckPageFlag(value,
value, // Used as scratch.
MemoryChunk::kPointersToHereAreInterestingMask,
eq,
&done);
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
eq,
&done);
// Record the actual write. // Record the actual write.
if (lr_status == kLRHasNotBeenSaved) { RecordWriteHelper(object, address, scratch);
push(lr);
}
RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
CallStub(&stub);
if (lr_status == kLRHasNotBeenSaved) {
pop(lr);
}
bind(&done); bind(&done);
// Clobber clobbered registers when running with the debug-code flag // Clobber all input registers when running with the debug-code flag
// turned on to provoke errors. // turned on to provoke errors.
if (emit_debug_code()) { if (emit_debug_code()) {
mov(address, Operand(BitCast<int32_t>(kZapValue + 12))); mov(object, Operand(BitCast<int32_t>(kZapValue)));
mov(value, Operand(BitCast<int32_t>(kZapValue + 16))); mov(address, Operand(BitCast<int32_t>(kZapValue)));
} mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address,
Register scratch,
SaveFPRegsMode fp_mode,
RememberedSetFinalAction and_then) {
Label done;
if (FLAG_debug_code) {
Label ok;
JumpIfNotInNewSpace(object, scratch, &ok);
stop("Remembered set pointer is in new space");
bind(&ok);
}
// Load store buffer top.
ExternalReference store_buffer =
ExternalReference::store_buffer_top(isolate());
mov(ip, Operand(store_buffer));
ldr(scratch, MemOperand(ip));
// Store pointer to buffer and increment buffer top.
str(address, MemOperand(scratch, kPointerSize, PostIndex));
// Write back new top of buffer.
str(scratch, MemOperand(ip));
// Call stub on end of buffer.
// Check for end of buffer.
tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
if (and_then == kFallThroughAtEnd) {
b(eq, &done);
} else {
ASSERT(and_then == kReturnAtEnd);
Ret(ne);
}
push(lr);
StoreBufferOverflowStub store_buffer_overflow =
StoreBufferOverflowStub(fp_mode);
CallStub(&store_buffer_overflow);
pop(lr);
bind(&done);
if (and_then == kReturnAtEnd) {
Ret();
} }
} }
@ -1015,9 +961,6 @@ void MacroAssembler::InvokeCode(Register code,
InvokeFlag flag, InvokeFlag flag,
const CallWrapper& call_wrapper, const CallWrapper& call_wrapper,
CallKind call_kind) { CallKind call_kind) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
Label done; Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag, InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
@ -1045,9 +988,6 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
RelocInfo::Mode rmode, RelocInfo::Mode rmode,
InvokeFlag flag, InvokeFlag flag,
CallKind call_kind) { CallKind call_kind) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
Label done; Label done;
InvokePrologue(expected, actual, code, no_reg, &done, flag, InvokePrologue(expected, actual, code, no_reg, &done, flag,
@ -1071,9 +1011,6 @@ void MacroAssembler::InvokeFunction(Register fun,
InvokeFlag flag, InvokeFlag flag,
const CallWrapper& call_wrapper, const CallWrapper& call_wrapper,
CallKind call_kind) { CallKind call_kind) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in r1. // Contract with called JS functions requires that function is passed in r1.
ASSERT(fun.is(r1)); ASSERT(fun.is(r1));
@ -1098,9 +1035,6 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
const ParameterCount& actual, const ParameterCount& actual,
InvokeFlag flag, InvokeFlag flag,
CallKind call_kind) { CallKind call_kind) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
ASSERT(function->is_compiled()); ASSERT(function->is_compiled());
// Get the function and setup the context. // Get the function and setup the context.
@ -1156,10 +1090,10 @@ void MacroAssembler::IsObjectJSStringType(Register object,
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() { void MacroAssembler::DebugBreak() {
ASSERT(allow_stub_calls());
mov(r0, Operand(0, RelocInfo::NONE)); mov(r0, Operand(0, RelocInfo::NONE));
mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
CEntryStub ces(1); CEntryStub ces(1);
ASSERT(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
} }
#endif #endif
@ -1859,127 +1793,13 @@ void MacroAssembler::CompareRoot(Register obj,
void MacroAssembler::CheckFastElements(Register map, void MacroAssembler::CheckFastElements(Register map,
Register scratch, Register scratch,
Label* fail) { Label* fail) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); STATIC_ASSERT(FAST_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 1);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue)); cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
b(hi, fail); b(hi, fail);
} }
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 1);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
b(ls, fail);
cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
b(hi, fail);
}
void MacroAssembler::CheckFastSmiOnlyElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
b(hi, fail);
}
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register receiver_reg,
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Label* fail) {
Label smi_value, maybe_nan, have_double_value, is_nan, done;
Register mantissa_reg = scratch2;
Register exponent_reg = scratch3;
// Handle smi values specially.
JumpIfSmi(value_reg, &smi_value);
// Ensure that the object is a heap number
CheckMap(value_reg,
scratch1,
isolate()->factory()->heap_number_map(),
fail,
DONT_DO_SMI_CHECK);
// Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
// in the exponent.
mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
cmp(exponent_reg, scratch1);
b(ge, &maybe_nan);
ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
bind(&have_double_value);
add(scratch1, elements_reg,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
str(exponent_reg, FieldMemOperand(scratch1, offset));
jmp(&done);
bind(&maybe_nan);
// Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
// it's an Infinity, and the non-NaN code path applies.
b(gt, &is_nan);
ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
cmp(mantissa_reg, Operand(0));
b(eq, &have_double_value);
bind(&is_nan);
// Load canonical NaN for storing into the double array.
uint64_t nan_int64 = BitCast<uint64_t>(
FixedDoubleArray::canonical_not_the_hole_nan_as_double());
mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
jmp(&have_double_value);
bind(&smi_value);
add(scratch1, elements_reg,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
add(scratch1, scratch1,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
// scratch1 is now effective address of the double element
FloatingPointHelper::Destination destination;
if (CpuFeatures::IsSupported(VFP3)) {
destination = FloatingPointHelper::kVFPRegisters;
} else {
destination = FloatingPointHelper::kCoreRegisters;
}
Register untagged_value = receiver_reg;
SmiUntag(untagged_value, value_reg);
FloatingPointHelper::ConvertIntToDouble(this,
untagged_value,
destination,
d0,
mantissa_reg,
exponent_reg,
scratch4,
s2);
if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatures::Scope scope(VFP3);
vstr(d0, scratch1, 0);
} else {
str(mantissa_reg, MemOperand(scratch1, 0));
str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
}
bind(&done);
}
void MacroAssembler::CheckMap(Register obj, void MacroAssembler::CheckMap(Register obj,
Register scratch, Register scratch,
Handle<Map> map, Handle<Map> map,
@ -2075,13 +1895,13 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
void MacroAssembler::CallStub(CodeStub* stub, Condition cond) { void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond); Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond);
} }
MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) { MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) {
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Object* result; Object* result;
{ MaybeObject* maybe_result = stub->TryGetCode(); { MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result; if (!maybe_result->ToObject(&result)) return maybe_result;
@ -2093,12 +1913,13 @@ MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) {
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe()); ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
} }
MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) { MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Object* result; Object* result;
{ MaybeObject* maybe_result = stub->TryGetCode(); { MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result; if (!maybe_result->ToObject(&result)) return maybe_result;
@ -2201,12 +2022,6 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
} }
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
}
void MacroAssembler::IllegalOperation(int num_arguments) { void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) { if (num_arguments > 0) {
add(sp, sp, Operand(num_arguments * kPointerSize)); add(sp, sp, Operand(num_arguments * kPointerSize));
@ -2602,7 +2417,8 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id); const Runtime::Function* function = Runtime::FunctionForId(id);
mov(r0, Operand(function->nargs)); mov(r0, Operand(function->nargs));
mov(r1, Operand(ExternalReference(function, isolate()))); mov(r1, Operand(ExternalReference(function, isolate())));
CEntryStub stub(1, kSaveFPRegs); CEntryStub stub(1);
stub.SaveDoubles();
CallStub(&stub); CallStub(&stub);
} }
@ -2675,9 +2491,6 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference(
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag, InvokeFlag flag,
const CallWrapper& call_wrapper) { const CallWrapper& call_wrapper) {
// You can't call a builtin without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
GetBuiltinEntry(r2, id); GetBuiltinEntry(r2, id);
if (flag == CALL_FUNCTION) { if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(r2)); call_wrapper.BeforeCall(CallSize(r2));
@ -2809,20 +2622,14 @@ void MacroAssembler::Abort(const char* msg) {
RecordComment(msg); RecordComment(msg);
} }
#endif #endif
// Disable stub call restrictions to always allow calls to abort.
AllowStubCallsScope allow_scope(this, true);
mov(r0, Operand(p0)); mov(r0, Operand(p0));
push(r0); push(r0);
mov(r0, Operand(Smi::FromInt(p1 - p0))); mov(r0, Operand(Smi::FromInt(p1 - p0)));
push(r0); push(r0);
// Disable stub call restrictions to always allow calls to abort. CallRuntime(Runtime::kAbort, 2);
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
CallRuntime(Runtime::kAbort, 2);
} else {
CallRuntime(Runtime::kAbort, 2);
}
// will not return here // will not return here
if (is_const_pool_blocked()) { if (is_const_pool_blocked()) {
// If the calling code cares about the exact number of // If the calling code cares about the exact number of
@ -3123,19 +2930,6 @@ void MacroAssembler::CopyBytes(Register src,
} }
void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
Register end_offset,
Register filler) {
Label loop, entry;
b(&entry);
bind(&loop);
str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
bind(&entry);
cmp(start_offset, end_offset);
b(lt, &loop);
}
void MacroAssembler::CountLeadingZeros(Register zeros, // Answer. void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
Register source, // Input. Register source, // Input.
Register scratch) { Register scratch) {
@ -3295,15 +3089,23 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
void MacroAssembler::CallCFunction(ExternalReference function, void MacroAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments, int num_reg_arguments,
int num_double_arguments) { int num_double_arguments) {
mov(ip, Operand(function)); CallCFunctionHelper(no_reg,
CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments); function,
ip,
num_reg_arguments,
num_double_arguments);
} }
void MacroAssembler::CallCFunction(Register function, void MacroAssembler::CallCFunction(Register function,
int num_reg_arguments, Register scratch,
int num_double_arguments) { int num_reg_arguments,
CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); int num_double_arguments) {
CallCFunctionHelper(function,
ExternalReference::the_hole_value_location(isolate()),
scratch,
num_reg_arguments,
num_double_arguments);
} }
@ -3314,15 +3116,17 @@ void MacroAssembler::CallCFunction(ExternalReference function,
void MacroAssembler::CallCFunction(Register function, void MacroAssembler::CallCFunction(Register function,
Register scratch,
int num_arguments) { int num_arguments) {
CallCFunction(function, num_arguments, 0); CallCFunction(function, scratch, num_arguments, 0);
} }
void MacroAssembler::CallCFunctionHelper(Register function, void MacroAssembler::CallCFunctionHelper(Register function,
ExternalReference function_reference,
Register scratch,
int num_reg_arguments, int num_reg_arguments,
int num_double_arguments) { int num_double_arguments) {
ASSERT(has_frame());
// Make sure that the stack is aligned before calling a C function unless // Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which // running in the simulator. The simulator has its own alignment check which
// provides more information. // provides more information.
@ -3346,6 +3150,10 @@ void MacroAssembler::CallCFunctionHelper(Register function,
// Just call directly. The function called cannot cause a GC, or // Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register // allow preemption, so the return address in the link register
// stays correct. // stays correct.
if (function.is(no_reg)) {
mov(scratch, Operand(function_reference));
function = scratch;
}
Call(function); Call(function);
int stack_passed_arguments = CalculateStackPassedWords( int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments); num_reg_arguments, num_double_arguments);
@ -3377,185 +3185,6 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
} }
void MacroAssembler::CheckPageFlag(
Register object,
Register scratch,
int mask,
Condition cc,
Label* condition_met) {
and_(scratch, object, Operand(~Page::kPageAlignmentMask));
ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
tst(scratch, Operand(mask));
b(cc, condition_met);
}
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black) {
HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
}
void MacroAssembler::HasColor(Register object,
Register bitmap_scratch,
Register mask_scratch,
Label* has_color,
int first_bit,
int second_bit) {
ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
GetMarkBits(object, bitmap_scratch, mask_scratch);
Label other_color, word_boundary;
ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
tst(ip, Operand(mask_scratch));
b(first_bit == 1 ? eq : ne, &other_color);
// Shift left 1 by adding.
add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
b(eq, &word_boundary);
tst(ip, Operand(mask_scratch));
b(second_bit == 1 ? ne : eq, has_color);
jmp(&other_color);
bind(&word_boundary);
ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
tst(ip, Operand(1));
b(second_bit == 1 ? ne : eq, has_color);
bind(&other_color);
}
// Detect some, but not all, common pointer-free objects. This is used by the
// incremental write barrier which doesn't care about oddballs (they are always
// marked black immediately so this code is not hit).
void MacroAssembler::JumpIfDataObject(Register value,
Register scratch,
Label* not_data_object) {
Label is_data_object;
ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
b(eq, &is_data_object);
ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
b(ne, not_data_object);
bind(&is_data_object);
}
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
mov(ip, Operand(1));
mov(mask_reg, Operand(ip, LSL, mask_reg));
}
void MacroAssembler::EnsureNotWhite(
Register value,
Register bitmap_scratch,
Register mask_scratch,
Register load_scratch,
Label* value_is_white_and_not_data) {
ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
Label done;
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
tst(mask_scratch, load_scratch);
b(ne, &done);
if (FLAG_debug_code) {
// Check for impossible bit pattern.
Label ok;
// LSL may overflow, making the check conservative.
tst(load_scratch, Operand(mask_scratch, LSL, 1));
b(eq, &ok);
stop("Impossible marking bit pattern");
bind(&ok);
}
// Value is white. We check whether it is data that doesn't need scanning.
// Currently only checks for HeapNumber and non-cons strings.
Register map = load_scratch; // Holds map while checking type.
Register length = load_scratch; // Holds length of object after testing type.
Label is_data_object;
// Check for heap-number
ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
CompareRoot(map, Heap::kHeapNumberMapRootIndex);
mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
b(eq, &is_data_object);
// Check for strings.
ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
Register instance_type = load_scratch;
ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
b(ne, value_is_white_and_not_data);
// It's a non-indirect (non-cons and non-slice) string.
// If it's external, the length is just ExternalString::kSize.
// Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
// External strings are the only ones with the kExternalStringTag bit
// set.
ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
tst(instance_type, Operand(kExternalStringTag));
mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
b(ne, &is_data_object);
// Sequential string, either ASCII or UC16.
// For ASCII (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
ldr(ip, FieldMemOperand(value, String::kLengthOffset));
tst(instance_type, Operand(kStringEncodingMask));
mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
and_(length, length, Operand(~kObjectAlignmentMask));
bind(&is_data_object);
// Value is a data object, and it is white. Mark it black. Since we know
// that the object is white we can make it black by flipping one bit.
ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
orr(ip, ip, Operand(mask_scratch));
str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
add(ip, ip, Operand(length));
str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
bind(&done);
}
void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
Usat(output_reg, 8, Operand(input_reg)); Usat(output_reg, 8, Operand(input_reg));
} }
@ -3605,17 +3234,6 @@ void MacroAssembler::LoadInstanceDescriptors(Register map,
} }
bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
if (r1.is(r2)) return true;
if (r1.is(r3)) return true;
if (r1.is(r4)) return true;
if (r2.is(r3)) return true;
if (r2.is(r4)) return true;
if (r3.is(r4)) return true;
return false;
}
CodePatcher::CodePatcher(byte* address, int instructions) CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address), : address_(address),
instructions_(instructions), instructions_(instructions),

226
deps/v8/src/arm/macro-assembler-arm.h

@ -29,7 +29,6 @@
#define V8_ARM_MACRO_ASSEMBLER_ARM_H_ #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
#include "assembler.h" #include "assembler.h"
#include "frames.h"
#include "v8globals.h" #include "v8globals.h"
namespace v8 { namespace v8 {
@ -80,14 +79,6 @@ enum ObjectToDoubleFlags {
}; };
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
bool AreAliased(Register r1, Register r2, Register r3, Register r4);
// MacroAssembler implements a collection of frequently used macros. // MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler { class MacroAssembler: public Assembler {
public: public:
@ -166,126 +157,40 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index, Heap::RootListIndex index,
Condition cond = al); Condition cond = al);
// ---------------------------------------------------------------------------
// GC Support
void IncrementalMarkingRecordWriteHelper(Register object,
Register value,
Register address);
enum RememberedSetFinalAction {
kReturnAtEnd,
kFallThroughAtEnd
};
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
// in new space.
void RememberedSetHelper(Register object, // Used for debug code.
Register addr,
Register scratch,
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then);
void CheckPageFlag(Register object,
Register scratch,
int mask,
Condition cc,
Label* condition_met);
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,
Register scratch,
Label* branch) {
InNewSpace(object, scratch, ne, branch);
}
// Check if object is in new space. Jumps if the object is in new space. // Check if object is in new space.
// The register scratch can be object itself, but it will be clobbered. // scratch can be object itself, but it will be clobbered.
void JumpIfInNewSpace(Register object, void InNewSpace(Register object,
Register scratch, Register scratch,
Label* branch) { Condition cond, // eq for new space, ne otherwise
InNewSpace(object, scratch, eq, branch); Label* branch);
}
// Check if an object has a given incremental marking color.
void HasColor(Register object,
Register scratch0,
Register scratch1,
Label* has_color,
int first_bit,
int second_bit);
void JumpIfBlack(Register object, // For the page containing |object| mark the region covering [address]
Register scratch0, // dirty. The object address must be in the first 8K of an allocated page.
Register scratch1, void RecordWriteHelper(Register object,
Label* on_black); Register address,
Register scratch);
// Checks the color of an object. If the object is already grey or black
// then we just fall through, since it is already live. If it is white and
// we can determine that it doesn't need to be scanned, then we just mark it
// black and fall through. For the rest we jump to the label so the
// incremental marker can fix its assumptions.
void EnsureNotWhite(Register object,
Register scratch1,
Register scratch2,
Register scratch3,
Label* object_is_white_and_not_data);
// Detects conservatively whether an object is data-only, ie it does need to // For the page containing |object| mark the region covering
// be scanned by the garbage collector. // [object+offset] dirty. The object address must be in the first 8K
void JumpIfDataObject(Register value, // of an allocated page. The 'scratch' registers are used in the
Register scratch, // implementation and all 3 registers are clobbered by the
Label* not_data_object); // operation, as well as the ip register. RecordWrite updates the
// write barrier even when storing smis.
// Notify the garbage collector that we wrote a pointer into an object. void RecordWrite(Register object,
// |object| is the object being stored into, |value| is the object being Operand offset,
// stored. value and scratch registers are clobbered by the operation. Register scratch0,
// The offset is the offset from the start of the object, not the offset from Register scratch1);
// the tagged HeapObject pointer. For use with FieldOperand(reg, off).
void RecordWriteField(
Register object,
int offset,
Register value,
Register scratch,
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
// As above, but the offset has the tag presubtracted. For use with
// MemOperand(reg, off).
inline void RecordWriteContextSlot(
Register context,
int offset,
Register value,
Register scratch,
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK) {
RecordWriteField(context,
offset + kHeapObjectTag,
value,
scratch,
lr_status,
save_fp,
remembered_set_action,
smi_check);
}
// For a given |object| notify the garbage collector that the slot |address| // For the page containing |object| mark the region covering
// has been written. |value| is the object being stored. The value and // [address] dirty. The object address must be in the first 8K of an
// address registers are clobbered by the operation. // allocated page. All 3 registers are clobbered by the operation,
void RecordWrite( // as well as the ip register. RecordWrite updates the write barrier
Register object, // even when storing smis.
Register address, void RecordWrite(Register object,
Register value, Register address,
LinkRegisterStatus lr_status, Register scratch);
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
// Push a handle. // Push a handle.
void Push(Handle<Object> handle); void Push(Handle<Object> handle);
@ -413,6 +318,16 @@ class MacroAssembler: public Assembler {
const double imm, const double imm,
const Condition cond = al); const Condition cond = al);
// ---------------------------------------------------------------------------
// Activation frames
void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
// Enter exit frame. // Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C. // stack_space - extra stack space, used for alignment before call to C.
void EnterExitFrame(bool save_doubles, int stack_space = 0); void EnterExitFrame(bool save_doubles, int stack_space = 0);
@ -654,13 +569,6 @@ class MacroAssembler: public Assembler {
Register length, Register length,
Register scratch); Register scratch);
// Initialize fields with filler values. Fields starting at |start_offset|
// not including end_offset are overwritten with the value in |filler|. At
// the end the loop, |start_offset| takes the value of |end_offset|.
void InitializeFieldsWithFiller(Register start_offset,
Register end_offset,
Register filler);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Support functions. // Support functions.
@ -700,31 +608,6 @@ class MacroAssembler: public Assembler {
Register scratch, Register scratch,
Label* fail); Label* fail);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map,
Register scratch,
Label* fail);
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
void CheckFastSmiOnlyElements(Register map,
Register scratch,
Label* fail);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
// the FastDoubleElements array elements, otherwise jump to fail.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register receiver_reg,
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Label* fail);
// Check if the map of an object is equal to a specified map (either // Check if the map of an object is equal to a specified map (either
// given directly or as an index into the root list) and branch to // given directly or as an index into the root list) and branch to
// label if not. Skip the smi check if not required (object is known // label if not. Skip the smi check if not required (object is known
@ -947,11 +830,11 @@ class MacroAssembler: public Assembler {
// return address (unless this is somehow accounted for by the called // return address (unless this is somehow accounted for by the called
// function). // function).
void CallCFunction(ExternalReference function, int num_arguments); void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, int num_arguments); void CallCFunction(Register function, Register scratch, int num_arguments);
void CallCFunction(ExternalReference function, void CallCFunction(ExternalReference function,
int num_reg_arguments, int num_reg_arguments,
int num_double_arguments); int num_double_arguments);
void CallCFunction(Register function, void CallCFunction(Register function, Register scratch,
int num_reg_arguments, int num_reg_arguments,
int num_double_arguments); int num_double_arguments);
@ -1019,9 +902,6 @@ class MacroAssembler: public Assembler {
bool generating_stub() { return generating_stub_; } bool generating_stub() { return generating_stub_; }
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; } void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; } bool allow_stub_calls() { return allow_stub_calls_; }
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() { return has_frame_; }
inline bool AllowThisStubCall(CodeStub* stub);
// EABI variant for double arguments in use. // EABI variant for double arguments in use.
bool use_eabi_hardfloat() { bool use_eabi_hardfloat() {
@ -1168,12 +1048,10 @@ class MacroAssembler: public Assembler {
void LoadInstanceDescriptors(Register map, Register descriptors); void LoadInstanceDescriptors(Register map, Register descriptors);
// Activation support.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
private: private:
void CallCFunctionHelper(Register function, void CallCFunctionHelper(Register function,
ExternalReference function_reference,
Register scratch,
int num_reg_arguments, int num_reg_arguments,
int num_double_arguments); int num_double_arguments);
@ -1189,25 +1067,16 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper, const CallWrapper& call_wrapper,
CallKind call_kind); CallKind call_kind);
// Activation support.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
void InitializeNewString(Register string, void InitializeNewString(Register string,
Register length, Register length,
Heap::RootListIndex map_index, Heap::RootListIndex map_index,
Register scratch1, Register scratch1,
Register scratch2); Register scratch2);
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object,
Register scratch,
Condition cond, // eq for new space, ne otherwise.
Label* branch);
// Helper for finding the mark bits for an address. Afterwards, the
// bitmap register points at the word with the mark bits and the mask
// the position of the first bit. Leaves addr_reg unchanged.
inline void GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg);
// Compute memory operands for safepoint stack slots. // Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code); static int SafepointRegisterStackIndex(int reg_code);
MemOperand SafepointRegisterSlot(Register reg); MemOperand SafepointRegisterSlot(Register reg);
@ -1215,7 +1084,6 @@ class MacroAssembler: public Assembler {
bool generating_stub_; bool generating_stub_;
bool allow_stub_calls_; bool allow_stub_calls_;
bool has_frame_;
// This handle will be patched with the code object on installation. // This handle will be patched with the code object on installation.
Handle<Object> code_object_; Handle<Object> code_object_;

15
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -371,12 +371,9 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
// Isolate. // Isolate.
__ mov(r3, Operand(ExternalReference::isolate_address())); __ mov(r3, Operand(ExternalReference::isolate_address()));
{ ExternalReference function =
AllowExternalCallThatCantCauseGC scope(masm_); ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
ExternalReference function = __ CallCFunction(function, argument_count);
ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
__ CallCFunction(function, argument_count);
}
// Check if function returned non-zero for success or zero for failure. // Check if function returned non-zero for success or zero for failure.
__ cmp(r0, Operand(0, RelocInfo::NONE)); __ cmp(r0, Operand(0, RelocInfo::NONE));
@ -614,12 +611,6 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Entry code: // Entry code:
__ bind(&entry_label_); __ bind(&entry_label_);
// Tell the system that we have a stack frame. Because the type is MANUAL, no
// is generated.
FrameScope scope(masm_, StackFrame::MANUAL);
// Actually emit code to start a new stack frame.
// Push arguments // Push arguments
// Save callee-save registers. // Save callee-save registers.
// Start new stack frame. // Start new stack frame.

2
deps/v8/src/arm/simulator-arm.cc

@ -1618,8 +1618,6 @@ void Simulator::HandleRList(Instruction* instr, bool load) {
ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address); ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address);
intptr_t* address = reinterpret_cast<intptr_t*>(start_address); intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
// Catch null pointers a little earlier.
ASSERT(start_address > 8191 || start_address < 0);
int reg = 0; int reg = 0;
while (rlist != 0) { while (rlist != 0) {
if ((rlist & 1) != 0) { if ((rlist & 1) != 0) {

324
deps/v8/src/arm/stub-cache-arm.cc

@ -431,13 +431,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address. // Update the write barrier for the array address.
// Pass the now unused name_reg as a scratch register. // Pass the now unused name_reg as a scratch register.
__ mov(name_reg, r0); __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
__ RecordWriteField(receiver_reg,
offset,
name_reg,
scratch,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
} else { } else {
// Write to the properties array. // Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize; int offset = index * kPointerSize + FixedArray::kHeaderSize;
@ -450,13 +444,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address. // Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return. // Ok to clobber receiver_reg and name_reg, since we return.
__ mov(name_reg, r0); __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
__ RecordWriteField(scratch,
offset,
name_reg,
receiver_reg,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
} }
// Return the value (register r0). // Return the value (register r0).
@ -565,10 +553,9 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
} }
static MaybeObject* GenerateFastApiDirectCall( static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
MacroAssembler* masm, const CallOptimization& optimization,
const CallOptimization& optimization, int argc) {
int argc) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- sp[0] : holder (set by CheckPrototypes) // -- sp[0] : holder (set by CheckPrototypes)
// -- sp[4] : callee js function // -- sp[4] : callee js function
@ -604,8 +591,6 @@ static MaybeObject* GenerateFastApiDirectCall(
ApiFunction fun(api_function_address); ApiFunction fun(api_function_address);
const int kApiStackSpace = 4; const int kApiStackSpace = 4;
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace); __ EnterExitFrame(false, kApiStackSpace);
// r0 = v8::Arguments& // r0 = v8::Arguments&
@ -631,11 +616,9 @@ static MaybeObject* GenerateFastApiDirectCall(
ExternalReference ref = ExternalReference(&fun, ExternalReference ref = ExternalReference(&fun,
ExternalReference::DIRECT_API_CALL, ExternalReference::DIRECT_API_CALL,
masm->isolate()); masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace); return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
} }
class CallInterceptorCompiler BASE_EMBEDDED { class CallInterceptorCompiler BASE_EMBEDDED {
public: public:
CallInterceptorCompiler(StubCompiler* stub_compiler, CallInterceptorCompiler(StubCompiler* stub_compiler,
@ -811,7 +794,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
miss_label); miss_label);
// Call a runtime function to load the interceptor property. // Call a runtime function to load the interceptor property.
FrameScope scope(masm, StackFrame::INTERNAL); __ EnterInternalFrame();
// Save the name_ register across the call. // Save the name_ register across the call.
__ push(name_); __ push(name_);
@ -828,8 +811,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Restore the name_ register. // Restore the name_ register.
__ pop(name_); __ pop(name_);
__ LeaveInternalFrame();
// Leave the internal frame.
} }
void LoadWithInterceptor(MacroAssembler* masm, void LoadWithInterceptor(MacroAssembler* masm,
@ -838,19 +820,18 @@ class CallInterceptorCompiler BASE_EMBEDDED {
JSObject* holder_obj, JSObject* holder_obj,
Register scratch, Register scratch,
Label* interceptor_succeeded) { Label* interceptor_succeeded) {
{ __ EnterInternalFrame();
FrameScope scope(masm, StackFrame::INTERNAL); __ Push(holder, name_);
__ Push(holder, name_);
CompileCallLoadPropertyWithInterceptor(masm,
CompileCallLoadPropertyWithInterceptor(masm, receiver,
receiver, holder,
holder, name_,
name_, holder_obj);
holder_obj);
__ pop(name_); // Restore the name.
__ pop(name_); // Restore the name. __ pop(receiver); // Restore the holder.
__ pop(receiver); // Restore the holder. __ LeaveInternalFrame();
}
// If interceptor returns no-result sentinel, call the constant function. // If interceptor returns no-result sentinel, call the constant function.
__ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex); __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
@ -1247,10 +1228,7 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
ApiFunction fun(getter_address); ApiFunction fun(getter_address);
const int kApiStackSpace = 1; const int kApiStackSpace = 1;
FrameScope frame_scope(masm(), StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace); __ EnterExitFrame(false, kApiStackSpace);
// Create AccessorInfo instance on the stack above the exit frame with // Create AccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object **args_) as the data. // scratch2 (internal::Object **args_) as the data.
__ str(scratch2, MemOperand(sp, 1 * kPointerSize)); __ str(scratch2, MemOperand(sp, 1 * kPointerSize));
@ -1310,44 +1288,42 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
// Save necessary data before invoking an interceptor. // Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers. // Requires a frame to make GC aware of pushed pointers.
{ __ EnterInternalFrame();
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
// CALLBACKS case needs a receiver to be passed into C++ callback. // CALLBACKS case needs a receiver to be passed into C++ callback.
__ Push(receiver, holder_reg, name_reg); __ Push(receiver, holder_reg, name_reg);
} else { } else {
__ Push(holder_reg, name_reg); __ Push(holder_reg, name_reg);
} }
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
CompileCallLoadPropertyWithInterceptor(masm(),
receiver,
holder_reg,
name_reg,
interceptor_holder);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
__ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
__ cmp(r0, scratch1);
__ b(eq, &interceptor_failed);
frame_scope.GenerateLeaveFrame();
__ Ret();
__ bind(&interceptor_failed); // Invoke an interceptor. Note: map checks from receiver to
__ pop(name_reg); // interceptor's holder has been compiled before (see a caller
__ pop(holder_reg); // of this method.)
if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { CompileCallLoadPropertyWithInterceptor(masm(),
__ pop(receiver); receiver,
} holder_reg,
name_reg,
interceptor_holder);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
__ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
__ cmp(r0, scratch1);
__ b(eq, &interceptor_failed);
__ LeaveInternalFrame();
__ Ret();
// Leave the internal frame. __ bind(&interceptor_failed);
__ pop(name_reg);
__ pop(holder_reg);
if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
__ pop(receiver);
} }
__ LeaveInternalFrame();
// Check that the maps from interceptor's holder to lookup's holder // Check that the maps from interceptor's holder to lookup's holder
// haven't changed. And load lookup's holder into |holder| register. // haven't changed. And load lookup's holder into |holder| register.
if (interceptor_holder != lookup->holder()) { if (interceptor_holder != lookup->holder()) {
@ -1580,7 +1556,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
DONT_DO_SMI_CHECK); DONT_DO_SMI_CHECK);
if (argc == 1) { // Otherwise fall through to call the builtin. if (argc == 1) { // Otherwise fall through to call the builtin.
Label attempt_to_grow_elements; Label exit, with_write_barrier, attempt_to_grow_elements;
// Get the array's length into r0 and calculate new length. // Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
@ -1595,15 +1571,11 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ cmp(r0, r4); __ cmp(r0, r4);
__ b(gt, &attempt_to_grow_elements); __ b(gt, &attempt_to_grow_elements);
// Check if value is a smi.
Label with_write_barrier;
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
__ JumpIfNotSmi(r4, &with_write_barrier);
// Save new length. // Save new length.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Push the element. // Push the element.
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
// We may need a register containing the address end_elements below, // We may need a register containing the address end_elements below,
// so write back the value in end_elements. // so write back the value in end_elements.
__ add(end_elements, elements, __ add(end_elements, elements,
@ -1613,31 +1585,14 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
// Check for a smi. // Check for a smi.
__ JumpIfNotSmi(r4, &with_write_barrier);
__ bind(&exit);
__ Drop(argc + 1); __ Drop(argc + 1);
__ Ret(); __ Ret();
__ bind(&with_write_barrier); __ bind(&with_write_barrier);
__ InNewSpace(elements, r4, eq, &exit);
__ ldr(r6, FieldMemOperand(receiver, HeapObject::kMapOffset)); __ RecordWriteHelper(elements, end_elements, r4);
__ CheckFastSmiOnlyElements(r6, r6, &call_builtin);
// Save new length.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Push the element.
// We may need a register containing the address end_elements below,
// so write back the value in end_elements.
__ add(end_elements, elements,
Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
__ RecordWrite(elements,
end_elements,
r4,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Drop(argc + 1); __ Drop(argc + 1);
__ Ret(); __ Ret();
@ -1649,15 +1604,6 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ b(&call_builtin); __ b(&call_builtin);
} }
__ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
// Growing elements that are SMI-only requires special handling in case
// the new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check;
__ JumpIfSmi(r2, &no_fast_elements_check);
__ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ CheckFastObjectElements(r7, r7, &call_builtin);
__ bind(&no_fast_elements_check);
Isolate* isolate = masm()->isolate(); Isolate* isolate = masm()->isolate();
ExternalReference new_space_allocation_top = ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate); ExternalReference::new_space_allocation_top_address(isolate);
@ -1684,7 +1630,8 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
// Update new_space_allocation_top. // Update new_space_allocation_top.
__ str(r6, MemOperand(r7)); __ str(r6, MemOperand(r7));
// Push the argument. // Push the argument.
__ str(r2, MemOperand(end_elements)); __ ldr(r6, MemOperand(sp, (argc - 1) * kPointerSize));
__ str(r6, MemOperand(end_elements));
// Fill the rest with holes. // Fill the rest with holes.
__ LoadRoot(r6, Heap::kTheHoleValueRootIndex); __ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
for (int i = 1; i < kAllocationDelta; i++) { for (int i = 1; i < kAllocationDelta; i++) {
@ -2766,15 +2713,6 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// Store the value in the cell. // Store the value in the cell.
__ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset)); __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
__ mov(r1, r0);
__ RecordWriteField(r4,
JSGlobalPropertyCell::kValueOffset,
r1,
r2,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET);
Counters* counters = masm()->isolate()->counters(); Counters* counters = masm()->isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3); __ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3);
__ Ret(); __ Ret();
@ -3516,7 +3454,6 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
@ -3603,7 +3540,6 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
} }
break; break;
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
@ -3944,7 +3880,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
} }
break; break;
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
@ -4008,7 +3943,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
@ -4148,7 +4082,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
@ -4301,10 +4234,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
} }
void KeyedStoreStubCompiler::GenerateStoreFastElement( void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
MacroAssembler* masm, bool is_js_array) {
bool is_js_array,
ElementsKind elements_kind) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : value // -- r0 : value
// -- r1 : key // -- r1 : key
@ -4346,33 +4277,15 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ cmp(key_reg, scratch); __ cmp(key_reg, scratch);
__ b(hs, &miss_force_generic); __ b(hs, &miss_force_generic);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { __ add(scratch,
__ JumpIfNotSmi(value_reg, &miss_force_generic); elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(scratch, STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
elements_reg, __ str(value_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag)); MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); __ RecordWrite(scratch,
__ add(scratch, Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize),
scratch, receiver_reg , elements_reg);
Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value_reg, MemOperand(scratch));
} else {
ASSERT(elements_kind == FAST_ELEMENTS);
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ add(scratch,
scratch,
Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value_reg, MemOperand(scratch));
__ mov(receiver_reg, value_reg);
__ RecordWrite(elements_reg, // Object.
scratch, // Address.
receiver_reg, // Value.
kLRHasNotBeenSaved,
kDontSaveFPRegs);
}
// value_reg (r0) is preserved. // value_reg (r0) is preserved.
// Done. // Done.
__ Ret(); __ Ret();
@ -4396,15 +4309,15 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- r4 : scratch // -- r4 : scratch
// -- r5 : scratch // -- r5 : scratch
// ----------------------------------- // -----------------------------------
Label miss_force_generic; Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
Register value_reg = r0; Register value_reg = r0;
Register key_reg = r1; Register key_reg = r1;
Register receiver_reg = r2; Register receiver_reg = r2;
Register elements_reg = r3; Register scratch = r3;
Register scratch1 = r4; Register elements_reg = r4;
Register scratch2 = r5; Register mantissa_reg = r5;
Register scratch3 = r6; Register exponent_reg = r6;
Register scratch4 = r7; Register scratch4 = r7;
// This stub is meant to be tail-jumped to, the receiver must already // This stub is meant to be tail-jumped to, the receiver must already
@ -4416,25 +4329,90 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// Check that the key is within bounds. // Check that the key is within bounds.
if (is_js_array) { if (is_js_array) {
__ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
} else { } else {
__ ldr(scratch1, __ ldr(scratch,
FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
} }
// Compare smis, unsigned compare catches both negative and out-of-bound // Compare smis, unsigned compare catches both negative and out-of-bound
// indexes. // indexes.
__ cmp(key_reg, scratch1); __ cmp(key_reg, scratch);
__ b(hs, &miss_force_generic); __ b(hs, &miss_force_generic);
__ StoreNumberToDoubleElements(value_reg, // Handle smi values specially.
key_reg, __ JumpIfSmi(value_reg, &smi_value);
receiver_reg,
elements_reg, // Ensure that the object is a heap number
scratch1, __ CheckMap(value_reg,
scratch2, scratch,
scratch3, masm->isolate()->factory()->heap_number_map(),
scratch4, &miss_force_generic,
&miss_force_generic); DONT_DO_SMI_CHECK);
// Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
// in the exponent.
__ mov(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
__ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
__ cmp(exponent_reg, scratch);
__ b(ge, &maybe_nan);
__ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
__ bind(&have_double_value);
__ add(scratch, elements_reg,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
__ str(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
__ str(exponent_reg, FieldMemOperand(scratch, offset));
__ Ret();
__ bind(&maybe_nan);
// Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
// it's an Infinity, and the non-NaN code path applies.
__ b(gt, &is_nan);
__ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
__ cmp(mantissa_reg, Operand(0));
__ b(eq, &have_double_value);
__ bind(&is_nan);
// Load canonical NaN for storing into the double array.
uint64_t nan_int64 = BitCast<uint64_t>(
FixedDoubleArray::canonical_not_the_hole_nan_as_double());
__ mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
__ mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
__ jmp(&have_double_value);
__ bind(&smi_value);
__ add(scratch, elements_reg,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
__ add(scratch, scratch,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
// scratch is now effective address of the double element
FloatingPointHelper::Destination destination;
if (CpuFeatures::IsSupported(VFP3)) {
destination = FloatingPointHelper::kVFPRegisters;
} else {
destination = FloatingPointHelper::kCoreRegisters;
}
Register untagged_value = receiver_reg;
__ SmiUntag(untagged_value, value_reg);
FloatingPointHelper::ConvertIntToDouble(
masm,
untagged_value,
destination,
d0,
mantissa_reg,
exponent_reg,
scratch4,
s2);
if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatures::Scope scope(VFP3);
__ vstr(d0, scratch, 0);
} else {
__ str(mantissa_reg, MemOperand(scratch, 0));
__ str(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
}
__ Ret(); __ Ret();
// Handle store cache miss, replacing the ic with the generic stub. // Handle store cache miss, replacing the ic with the generic stub.

151
deps/v8/src/array.js

@ -201,14 +201,17 @@ function ConvertToString(x) {
function ConvertToLocaleString(e) { function ConvertToLocaleString(e) {
if (IS_NULL_OR_UNDEFINED(e)) { if (e == null) {
return ''; return '';
} else { } else {
// According to ES5, seciton 15.4.4.3, the toLocaleString conversion // e_obj's toLocaleString might be overwritten, check if it is a function.
// must throw a TypeError if ToObject(e).toLocaleString isn't // Call ToString if toLocaleString is not a function.
// callable. // See issue 877615.
var e_obj = ToObject(e); var e_obj = ToObject(e);
return %ToString(e_obj.toLocaleString()); if (IS_SPEC_FUNCTION(e_obj.toLocaleString))
return ToString(e_obj.toLocaleString());
else
return ToString(e);
} }
} }
@ -378,31 +381,18 @@ function SimpleMove(array, start_i, del_count, len, num_additional_args) {
function ArrayToString() { function ArrayToString() {
var array; if (!IS_ARRAY(this)) {
var func; throw new $TypeError('Array.prototype.toString is not generic');
if (IS_ARRAY(this)) {
func = this.join;
if (func === ArrayJoin) {
return Join(this, this.length, ',', ConvertToString);
}
array = this;
} else {
array = ToObject(this);
func = array.join;
} }
if (!IS_SPEC_FUNCTION(func)) { return Join(this, this.length, ',', ConvertToString);
return %_CallFunction(array, ObjectToString);
}
return %_CallFunction(array, func);
} }
function ArrayToLocaleString() { function ArrayToLocaleString() {
var array = ToObject(this); if (!IS_ARRAY(this)) {
var arrayLen = array.length; throw new $TypeError('Array.prototype.toString is not generic');
var len = TO_UINT32(arrayLen); }
if (len === 0) return ""; return Join(this, this.length, ',', ConvertToLocaleString);
return Join(array, len, ',', ConvertToLocaleString);
} }
@ -1003,24 +993,21 @@ function ArrayFilter(f, receiver) {
["Array.prototype.filter"]); ["Array.prototype.filter"]);
} }
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var length = ToUint32(array.length);
if (!IS_SPEC_FUNCTION(f)) { if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]); throw MakeTypeError('called_non_callable', [ f ]);
} }
if (IS_NULL_OR_UNDEFINED(receiver)) { if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver; receiver = %GetDefaultReceiver(f) || receiver;
} }
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = ToUint32(this.length);
var result = []; var result = [];
var result_length = 0; var result_length = 0;
for (var i = 0; i < length; i++) { for (var i = 0; i < length; i++) {
var current = array[i]; var current = this[i];
if (!IS_UNDEFINED(current) || i in array) { if (!IS_UNDEFINED(current) || i in this) {
if (%_CallFunction(receiver, current, i, array, f)) { if (%_CallFunction(receiver, current, i, this, f)) {
result[result_length++] = current; result[result_length++] = current;
} }
} }
@ -1035,22 +1022,19 @@ function ArrayForEach(f, receiver) {
["Array.prototype.forEach"]); ["Array.prototype.forEach"]);
} }
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var length = TO_UINT32(array.length);
if (!IS_SPEC_FUNCTION(f)) { if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]); throw MakeTypeError('called_non_callable', [ f ]);
} }
if (IS_NULL_OR_UNDEFINED(receiver)) { if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver; receiver = %GetDefaultReceiver(f) || receiver;
} }
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = TO_UINT32(this.length);
for (var i = 0; i < length; i++) { for (var i = 0; i < length; i++) {
var current = array[i]; var current = this[i];
if (!IS_UNDEFINED(current) || i in array) { if (!IS_UNDEFINED(current) || i in this) {
%_CallFunction(receiver, current, i, array, f); %_CallFunction(receiver, current, i, this, f);
} }
} }
} }
@ -1064,22 +1048,19 @@ function ArraySome(f, receiver) {
["Array.prototype.some"]); ["Array.prototype.some"]);
} }
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var length = TO_UINT32(array.length);
if (!IS_SPEC_FUNCTION(f)) { if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]); throw MakeTypeError('called_non_callable', [ f ]);
} }
if (IS_NULL_OR_UNDEFINED(receiver)) { if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver; receiver = %GetDefaultReceiver(f) || receiver;
} }
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = TO_UINT32(this.length);
for (var i = 0; i < length; i++) { for (var i = 0; i < length; i++) {
var current = array[i]; var current = this[i];
if (!IS_UNDEFINED(current) || i in array) { if (!IS_UNDEFINED(current) || i in this) {
if (%_CallFunction(receiver, current, i, array, f)) return true; if (%_CallFunction(receiver, current, i, this, f)) return true;
} }
} }
return false; return false;
@ -1092,22 +1073,19 @@ function ArrayEvery(f, receiver) {
["Array.prototype.every"]); ["Array.prototype.every"]);
} }
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var length = TO_UINT32(array.length);
if (!IS_SPEC_FUNCTION(f)) { if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]); throw MakeTypeError('called_non_callable', [ f ]);
} }
if (IS_NULL_OR_UNDEFINED(receiver)) { if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver; receiver = %GetDefaultReceiver(f) || receiver;
} }
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = TO_UINT32(this.length);
for (var i = 0; i < length; i++) { for (var i = 0; i < length; i++) {
var current = array[i]; var current = this[i];
if (!IS_UNDEFINED(current) || i in array) { if (!IS_UNDEFINED(current) || i in this) {
if (!%_CallFunction(receiver, current, i, array, f)) return false; if (!%_CallFunction(receiver, current, i, this, f)) return false;
} }
} }
return true; return true;
@ -1119,24 +1097,21 @@ function ArrayMap(f, receiver) {
["Array.prototype.map"]); ["Array.prototype.map"]);
} }
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var length = TO_UINT32(array.length);
if (!IS_SPEC_FUNCTION(f)) { if (!IS_SPEC_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]); throw MakeTypeError('called_non_callable', [ f ]);
} }
if (IS_NULL_OR_UNDEFINED(receiver)) { if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver; receiver = %GetDefaultReceiver(f) || receiver;
} }
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = TO_UINT32(this.length);
var result = new $Array(); var result = new $Array();
var accumulator = new InternalArray(length); var accumulator = new InternalArray(length);
for (var i = 0; i < length; i++) { for (var i = 0; i < length; i++) {
var current = array[i]; var current = this[i];
if (!IS_UNDEFINED(current) || i in array) { if (!IS_UNDEFINED(current) || i in this) {
accumulator[i] = %_CallFunction(receiver, current, i, array, f); accumulator[i] = %_CallFunction(receiver, current, i, this, f);
} }
} }
%MoveArrayContents(accumulator, result); %MoveArrayContents(accumulator, result);
@ -1270,20 +1245,19 @@ function ArrayReduce(callback, current) {
["Array.prototype.reduce"]); ["Array.prototype.reduce"]);
} }
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = ToObject(this);
var length = ToUint32(array.length);
if (!IS_SPEC_FUNCTION(callback)) { if (!IS_SPEC_FUNCTION(callback)) {
throw MakeTypeError('called_non_callable', [callback]); throw MakeTypeError('called_non_callable', [callback]);
} }
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = ToUint32(this.length);
var i = 0; var i = 0;
find_initial: if (%_ArgumentsLength() < 2) { find_initial: if (%_ArgumentsLength() < 2) {
for (; i < length; i++) { for (; i < length; i++) {
current = array[i]; current = this[i];
if (!IS_UNDEFINED(current) || i in array) { if (!IS_UNDEFINED(current) || i in this) {
i++; i++;
break find_initial; break find_initial;
} }
@ -1293,9 +1267,9 @@ function ArrayReduce(callback, current) {
var receiver = %GetDefaultReceiver(callback); var receiver = %GetDefaultReceiver(callback);
for (; i < length; i++) { for (; i < length; i++) {
var element = array[i]; var element = this[i];
if (!IS_UNDEFINED(element) || i in array) { if (!IS_UNDEFINED(element) || i in this) {
current = %_CallFunction(receiver, current, element, i, array, callback); current = %_CallFunction(receiver, current, element, i, this, callback);
} }
} }
return current; return current;
@ -1307,20 +1281,15 @@ function ArrayReduceRight(callback, current) {
["Array.prototype.reduceRight"]); ["Array.prototype.reduceRight"]);
} }
// Pull out the length so that side effects are visible before the
// callback function is checked.
var array = ToObject(this);
var length = ToUint32(array.length);
if (!IS_SPEC_FUNCTION(callback)) { if (!IS_SPEC_FUNCTION(callback)) {
throw MakeTypeError('called_non_callable', [callback]); throw MakeTypeError('called_non_callable', [callback]);
} }
var i = ToUint32(this.length) - 1;
var i = length - 1;
find_initial: if (%_ArgumentsLength() < 2) { find_initial: if (%_ArgumentsLength() < 2) {
for (; i >= 0; i--) { for (; i >= 0; i--) {
current = array[i]; current = this[i];
if (!IS_UNDEFINED(current) || i in array) { if (!IS_UNDEFINED(current) || i in this) {
i--; i--;
break find_initial; break find_initial;
} }
@ -1330,9 +1299,9 @@ function ArrayReduceRight(callback, current) {
var receiver = %GetDefaultReceiver(callback); var receiver = %GetDefaultReceiver(callback);
for (; i >= 0; i--) { for (; i >= 0; i--) {
var element = array[i]; var element = this[i];
if (!IS_UNDEFINED(element) || i in array) { if (!IS_UNDEFINED(element) || i in this) {
current = %_CallFunction(receiver, current, element, i, array, callback); current = %_CallFunction(receiver, current, element, i, this, callback);
} }
} }
return current; return current;

56
deps/v8/src/assembler.cc

@ -38,7 +38,6 @@
#include "deoptimizer.h" #include "deoptimizer.h"
#include "execution.h" #include "execution.h"
#include "ic-inl.h" #include "ic-inl.h"
#include "incremental-marking.h"
#include "factory.h" #include "factory.h"
#include "runtime.h" #include "runtime.h"
#include "runtime-profiler.h" #include "runtime-profiler.h"
@ -48,7 +47,6 @@
#include "ast.h" #include "ast.h"
#include "regexp-macro-assembler.h" #include "regexp-macro-assembler.h"
#include "platform.h" #include "platform.h"
#include "store-buffer.h"
// Include native regexp-macro-assembler. // Include native regexp-macro-assembler.
#ifndef V8_INTERPRETED_REGEXP #ifndef V8_INTERPRETED_REGEXP
#if V8_TARGET_ARCH_IA32 #if V8_TARGET_ARCH_IA32
@ -518,7 +516,6 @@ void RelocIterator::next() {
RelocIterator::RelocIterator(Code* code, int mode_mask) { RelocIterator::RelocIterator(Code* code, int mode_mask) {
rinfo_.host_ = code;
rinfo_.pc_ = code->instruction_start(); rinfo_.pc_ = code->instruction_start();
rinfo_.data_ = 0; rinfo_.data_ = 0;
// Relocation info is read backwards. // Relocation info is read backwards.
@ -739,38 +736,9 @@ ExternalReference::ExternalReference(const SCTableReference& table_ref)
: address_(table_ref.address()) {} : address_(table_ref.address()) {}
ExternalReference ExternalReference::
incremental_marking_record_write_function(Isolate* isolate) {
return ExternalReference(Redirect(
isolate,
FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode)));
}
ExternalReference ExternalReference::
incremental_evacuation_record_write_function(Isolate* isolate) {
return ExternalReference(Redirect(
isolate,
FUNCTION_ADDR(IncrementalMarking::RecordWriteForEvacuationFromCode)));
}
ExternalReference ExternalReference::
store_buffer_overflow_function(Isolate* isolate) {
return ExternalReference(Redirect(
isolate,
FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow)));
}
ExternalReference ExternalReference::flush_icache_function(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CPU::FlushICache)));
}
ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) { ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) {
return return ExternalReference(Redirect(isolate,
ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::PerformGC))); FUNCTION_ADDR(Runtime::PerformGC)));
} }
@ -834,6 +802,17 @@ ExternalReference ExternalReference::keyed_lookup_cache_field_offsets(
} }
ExternalReference ExternalReference::the_hole_value_location(Isolate* isolate) {
return ExternalReference(isolate->factory()->the_hole_value().location());
}
ExternalReference ExternalReference::arguments_marker_location(
Isolate* isolate) {
return ExternalReference(isolate->factory()->arguments_marker().location());
}
ExternalReference ExternalReference::roots_address(Isolate* isolate) { ExternalReference ExternalReference::roots_address(Isolate* isolate) {
return ExternalReference(isolate->heap()->roots_address()); return ExternalReference(isolate->heap()->roots_address());
} }
@ -861,14 +840,9 @@ ExternalReference ExternalReference::new_space_start(Isolate* isolate) {
} }
ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
return ExternalReference(isolate->heap()->store_buffer()->TopAddress());
}
ExternalReference ExternalReference::new_space_mask(Isolate* isolate) { ExternalReference ExternalReference::new_space_mask(Isolate* isolate) {
return ExternalReference(reinterpret_cast<Address>( Address mask = reinterpret_cast<Address>(isolate->heap()->NewSpaceMask());
isolate->heap()->NewSpaceMask())); return ExternalReference(mask);
} }

27
deps/v8/src/assembler.h

@ -143,9 +143,6 @@ class Label BASE_EMBEDDED {
}; };
enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs };
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Relocation information // Relocation information
@ -219,9 +216,8 @@ class RelocInfo BASE_EMBEDDED {
RelocInfo() {} RelocInfo() {}
RelocInfo(byte* pc, Mode rmode, intptr_t data)
RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host) : pc_(pc), rmode_(rmode), data_(data) {
: pc_(pc), rmode_(rmode), data_(data), host_(host) {
} }
static inline bool IsConstructCall(Mode mode) { static inline bool IsConstructCall(Mode mode) {
@ -262,7 +258,6 @@ class RelocInfo BASE_EMBEDDED {
void set_pc(byte* pc) { pc_ = pc; } void set_pc(byte* pc) { pc_ = pc; }
Mode rmode() const { return rmode_; } Mode rmode() const { return rmode_; }
intptr_t data() const { return data_; } intptr_t data() const { return data_; }
Code* host() const { return host_; }
// Apply a relocation by delta bytes // Apply a relocation by delta bytes
INLINE(void apply(intptr_t delta)); INLINE(void apply(intptr_t delta));
@ -358,7 +353,6 @@ class RelocInfo BASE_EMBEDDED {
byte* pc_; byte* pc_;
Mode rmode_; Mode rmode_;
intptr_t data_; intptr_t data_;
Code* host_;
#ifdef V8_TARGET_ARCH_MIPS #ifdef V8_TARGET_ARCH_MIPS
// Code and Embedded Object pointers in mips are stored split // Code and Embedded Object pointers in mips are stored split
// across two consecutive 32-bit instructions. Heap management // across two consecutive 32-bit instructions. Heap management
@ -567,13 +561,6 @@ class ExternalReference BASE_EMBEDDED {
// pattern. This means that they have to be added to the // pattern. This means that they have to be added to the
// ExternalReferenceTable in serialize.cc manually. // ExternalReferenceTable in serialize.cc manually.
static ExternalReference incremental_marking_record_write_function(
Isolate* isolate);
static ExternalReference incremental_evacuation_record_write_function(
Isolate* isolate);
static ExternalReference store_buffer_overflow_function(
Isolate* isolate);
static ExternalReference flush_icache_function(Isolate* isolate);
static ExternalReference perform_gc_function(Isolate* isolate); static ExternalReference perform_gc_function(Isolate* isolate);
static ExternalReference fill_heap_number_with_random_function( static ExternalReference fill_heap_number_with_random_function(
Isolate* isolate); Isolate* isolate);
@ -590,6 +577,12 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference keyed_lookup_cache_keys(Isolate* isolate); static ExternalReference keyed_lookup_cache_keys(Isolate* isolate);
static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate); static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate);
// Static variable Factory::the_hole_value.location()
static ExternalReference the_hole_value_location(Isolate* isolate);
// Static variable Factory::arguments_marker.location()
static ExternalReference arguments_marker_location(Isolate* isolate);
// Static variable Heap::roots_address() // Static variable Heap::roots_address()
static ExternalReference roots_address(Isolate* isolate); static ExternalReference roots_address(Isolate* isolate);
@ -613,10 +606,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference new_space_start(Isolate* isolate); static ExternalReference new_space_start(Isolate* isolate);
static ExternalReference new_space_mask(Isolate* isolate); static ExternalReference new_space_mask(Isolate* isolate);
static ExternalReference heap_always_allocate_scope_depth(Isolate* isolate); static ExternalReference heap_always_allocate_scope_depth(Isolate* isolate);
static ExternalReference new_space_mark_bits(Isolate* isolate);
// Write barrier.
static ExternalReference store_buffer_top(Isolate* isolate);
// Used for fast allocation in generated code. // Used for fast allocation in generated code.
static ExternalReference new_space_allocation_top_address(Isolate* isolate); static ExternalReference new_space_allocation_top_address(Isolate* isolate);

154
deps/v8/src/ast.cc

@ -327,77 +327,56 @@ bool BinaryOperation::ResultOverwriteAllowed() {
} }
static bool IsTypeof(Expression* expr) {
UnaryOperation* maybe_unary = expr->AsUnaryOperation();
return maybe_unary != NULL && maybe_unary->op() == Token::TYPEOF;
}
// Check for the pattern: typeof <expression> equals <string literal>.
static bool MatchLiteralCompareTypeof(Expression* left,
Token::Value op,
Expression* right,
Expression** expr,
Handle<String>* check) {
if (IsTypeof(left) && right->IsStringLiteral() && Token::IsEqualityOp(op)) {
*expr = left->AsUnaryOperation()->expression();
*check = Handle<String>::cast(right->AsLiteral()->handle());
return true;
}
return false;
}
bool CompareOperation::IsLiteralCompareTypeof(Expression** expr, bool CompareOperation::IsLiteralCompareTypeof(Expression** expr,
Handle<String>* check) { Handle<String>* check) {
return MatchLiteralCompareTypeof(left_, op_, right_, expr, check) || if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return false;
MatchLiteralCompareTypeof(right_, op_, left_, expr, check);
} UnaryOperation* left_unary = left_->AsUnaryOperation();
UnaryOperation* right_unary = right_->AsUnaryOperation();
Literal* left_literal = left_->AsLiteral();
static bool IsVoidOfLiteral(Expression* expr) { Literal* right_literal = right_->AsLiteral();
UnaryOperation* maybe_unary = expr->AsUnaryOperation();
return maybe_unary != NULL && // Check for the pattern: typeof <expression> == <string literal>.
maybe_unary->op() == Token::VOID && if (left_unary != NULL && left_unary->op() == Token::TYPEOF &&
maybe_unary->expression()->AsLiteral() != NULL; right_literal != NULL && right_literal->handle()->IsString()) {
} *expr = left_unary->expression();
*check = Handle<String>::cast(right_literal->handle());
return true;
}
// Check for the pattern: void <literal> equals <expression> // Check for the pattern: <string literal> == typeof <expression>.
static bool MatchLiteralCompareUndefined(Expression* left, if (right_unary != NULL && right_unary->op() == Token::TYPEOF &&
Token::Value op, left_literal != NULL && left_literal->handle()->IsString()) {
Expression* right, *expr = right_unary->expression();
Expression** expr) { *check = Handle<String>::cast(left_literal->handle());
if (IsVoidOfLiteral(left) && Token::IsEqualityOp(op)) {
*expr = right;
return true; return true;
} }
return false; return false;
} }
bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) { bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
return MatchLiteralCompareUndefined(left_, op_, right_, expr) || if (op_ != Token::EQ_STRICT) return false;
MatchLiteralCompareUndefined(right_, op_, left_, expr);
}
UnaryOperation* left_unary = left_->AsUnaryOperation();
UnaryOperation* right_unary = right_->AsUnaryOperation();
// Check for the pattern: null equals <expression> // Check for the pattern: <expression> === void <literal>.
static bool MatchLiteralCompareNull(Expression* left, if (right_unary != NULL && right_unary->op() == Token::VOID &&
Token::Value op, right_unary->expression()->AsLiteral() != NULL) {
Expression* right, *expr = left_;
Expression** expr) {
if (left->IsNullLiteral() && Token::IsEqualityOp(op)) {
*expr = right;
return true; return true;
} }
return false;
}
// Check for the pattern: void <literal> === <expression>.
if (left_unary != NULL && left_unary->op() == Token::VOID &&
left_unary->expression()->AsLiteral() != NULL) {
*expr = right_;
return true;
}
bool CompareOperation::IsLiteralCompareNull(Expression** expr) { return false;
return MatchLiteralCompareNull(left_, op_, right_, expr) ||
MatchLiteralCompareNull(right_, op_, left_, expr);
} }
@ -550,9 +529,7 @@ bool Conditional::IsInlineable() const {
bool VariableProxy::IsInlineable() const { bool VariableProxy::IsInlineable() const {
return var()->IsUnallocated() return var()->IsUnallocated() || var()->IsStackAllocated();
|| var()->IsStackAllocated()
|| var()->IsContextSlot();
} }
@ -621,6 +598,11 @@ bool CompareOperation::IsInlineable() const {
} }
bool CompareToNull::IsInlineable() const {
return expression()->IsInlineable();
}
bool CountOperation::IsInlineable() const { bool CountOperation::IsInlineable() const {
return expression()->IsInlineable(); return expression()->IsInlineable();
} }
@ -764,41 +746,37 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle, void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
CallKind call_kind) { CallKind call_kind) {
is_monomorphic_ = oracle->CallIsMonomorphic(this);
Property* property = expression()->AsProperty(); Property* property = expression()->AsProperty();
if (property == NULL) { ASSERT(property != NULL);
// Function call. Specialize for monomorphic calls. // Specialize for the receiver types seen at runtime.
if (is_monomorphic_) target_ = oracle->GetCallTarget(this); Literal* key = property->key()->AsLiteral();
} else { ASSERT(key != NULL && key->handle()->IsString());
// Method call. Specialize for the receiver types seen at runtime. Handle<String> name = Handle<String>::cast(key->handle());
Literal* key = property->key()->AsLiteral(); receiver_types_.Clear();
ASSERT(key != NULL && key->handle()->IsString()); oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
Handle<String> name = Handle<String>::cast(key->handle());
receiver_types_.Clear();
oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
#ifdef DEBUG #ifdef DEBUG
if (FLAG_enable_slow_asserts) { if (FLAG_enable_slow_asserts) {
int length = receiver_types_.length(); int length = receiver_types_.length();
for (int i = 0; i < length; i++) { for (int i = 0; i < length; i++) {
Handle<Map> map = receiver_types_.at(i); Handle<Map> map = receiver_types_.at(i);
ASSERT(!map.is_null() && *map != NULL); ASSERT(!map.is_null() && *map != NULL);
}
} }
}
#endif #endif
check_type_ = oracle->GetCallCheckType(this); is_monomorphic_ = oracle->CallIsMonomorphic(this);
if (is_monomorphic_) { check_type_ = oracle->GetCallCheckType(this);
Handle<Map> map; if (is_monomorphic_) {
if (receiver_types_.length() > 0) { Handle<Map> map;
ASSERT(check_type_ == RECEIVER_MAP_CHECK); if (receiver_types_.length() > 0) {
map = receiver_types_.at(0); ASSERT(check_type_ == RECEIVER_MAP_CHECK);
} else { map = receiver_types_.at(0);
ASSERT(check_type_ != RECEIVER_MAP_CHECK); } else {
holder_ = Handle<JSObject>( ASSERT(check_type_ != RECEIVER_MAP_CHECK);
oracle->GetPrototypeForPrimitiveCheck(check_type_)); holder_ = Handle<JSObject>(
map = Handle<Map>(holder_->map()); oracle->GetPrototypeForPrimitiveCheck(check_type_));
} map = Handle<Map>(holder_->map());
is_monomorphic_ = ComputeTarget(map, name);
} }
is_monomorphic_ = ComputeTarget(map, name);
} }
} }

29
deps/v8/src/ast.h

@ -90,6 +90,7 @@ namespace internal {
V(CountOperation) \ V(CountOperation) \
V(BinaryOperation) \ V(BinaryOperation) \
V(CompareOperation) \ V(CompareOperation) \
V(CompareToNull) \
V(ThisFunction) V(ThisFunction)
#define AST_NODE_LIST(V) \ #define AST_NODE_LIST(V) \
@ -288,12 +289,6 @@ class Expression: public AstNode {
// True iff the expression is a literal represented as a smi. // True iff the expression is a literal represented as a smi.
virtual bool IsSmiLiteral() { return false; } virtual bool IsSmiLiteral() { return false; }
// True iff the expression is a string literal.
virtual bool IsStringLiteral() { return false; }
// True iff the expression is the null literal.
virtual bool IsNullLiteral() { return false; }
// Type feedback information for assignments and properties. // Type feedback information for assignments and properties.
virtual bool IsMonomorphic() { virtual bool IsMonomorphic() {
UNREACHABLE(); UNREACHABLE();
@ -896,8 +891,6 @@ class Literal: public Expression {
virtual bool IsTrivial() { return true; } virtual bool IsTrivial() { return true; }
virtual bool IsSmiLiteral() { return handle_->IsSmi(); } virtual bool IsSmiLiteral() { return handle_->IsSmi(); }
virtual bool IsStringLiteral() { return handle_->IsString(); }
virtual bool IsNullLiteral() { return handle_->IsNull(); }
// Check if this literal is identical to the other literal. // Check if this literal is identical to the other literal.
bool IsIdenticalTo(const Literal* other) const { bool IsIdenticalTo(const Literal* other) const {
@ -1472,7 +1465,6 @@ class CompareOperation: public Expression {
// Match special cases. // Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check); bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
bool IsLiteralCompareUndefined(Expression** expr); bool IsLiteralCompareUndefined(Expression** expr);
bool IsLiteralCompareNull(Expression** expr);
private: private:
Token::Value op_; Token::Value op_;
@ -1485,6 +1477,25 @@ class CompareOperation: public Expression {
}; };
class CompareToNull: public Expression {
public:
CompareToNull(Isolate* isolate, bool is_strict, Expression* expression)
: Expression(isolate), is_strict_(is_strict), expression_(expression) { }
DECLARE_NODE_TYPE(CompareToNull)
virtual bool IsInlineable() const;
bool is_strict() const { return is_strict_; }
Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; }
Expression* expression() const { return expression_; }
private:
bool is_strict_;
Expression* expression_;
};
class Conditional: public Expression { class Conditional: public Expression {
public: public:
Conditional(Isolate* isolate, Conditional(Isolate* isolate,

57
deps/v8/src/bootstrapper.cc

@ -34,7 +34,6 @@
#include "debug.h" #include "debug.h"
#include "execution.h" #include "execution.h"
#include "global-handles.h" #include "global-handles.h"
#include "isolate-inl.h"
#include "macro-assembler.h" #include "macro-assembler.h"
#include "natives.h" #include "natives.h"
#include "objects-visiting.h" #include "objects-visiting.h"
@ -996,26 +995,6 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
initial_map->instance_size() + 5 * kPointerSize); initial_map->instance_size() + 5 * kPointerSize);
initial_map->set_instance_descriptors(*descriptors); initial_map->set_instance_descriptors(*descriptors);
initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map)); initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map));
// RegExp prototype object is itself a RegExp.
Handle<Map> proto_map = factory->CopyMapDropTransitions(initial_map);
proto_map->set_prototype(global_context()->initial_object_prototype());
Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
heap->empty_string());
proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex,
heap->false_value());
proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex,
heap->false_value());
proto->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex,
heap->false_value());
proto->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
Smi::FromInt(0),
SKIP_WRITE_BARRIER); // It's a Smi.
initial_map->set_prototype(*proto);
factory->SetRegExpIrregexpData(Handle<JSRegExp>::cast(proto),
JSRegExp::IRREGEXP, factory->empty_string(),
JSRegExp::Flags(0), 0);
} }
{ // -- J S O N { // -- J S O N
@ -1097,11 +1076,6 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
elements->set(0, *array); elements->set(0, *array);
array = factory->NewFixedArray(0); array = factory->NewFixedArray(0);
elements->set(1, *array); elements->set(1, *array);
Handle<Map> non_strict_arguments_elements_map =
factory->GetElementsTransitionMap(result,
NON_STRICT_ARGUMENTS_ELEMENTS);
result->set_map(*non_strict_arguments_elements_map);
ASSERT(result->HasNonStrictArgumentsElements());
result->set_elements(*elements); result->set_elements(*elements);
global_context()->set_aliased_arguments_boilerplate(*result); global_context()->set_aliased_arguments_boilerplate(*result);
} }
@ -1353,8 +1327,6 @@ void Genesis::InstallNativeFunctions() {
configure_instance_fun); configure_instance_fun);
INSTALL_NATIVE(JSFunction, "GetStackTraceLine", get_stack_trace_line_fun); INSTALL_NATIVE(JSFunction, "GetStackTraceLine", get_stack_trace_line_fun);
INSTALL_NATIVE(JSObject, "functionCache", function_cache); INSTALL_NATIVE(JSObject, "functionCache", function_cache);
INSTALL_NATIVE(JSFunction, "ToCompletePropertyDescriptor",
to_complete_property_descriptor);
} }
void Genesis::InstallExperimentalNativeFunctions() { void Genesis::InstallExperimentalNativeFunctions() {
@ -1583,18 +1555,6 @@ bool Genesis::InstallNatives() {
isolate()->builtins()->builtin(Builtins::kArrayConstructCode)); isolate()->builtins()->builtin(Builtins::kArrayConstructCode));
array_function->shared()->DontAdaptArguments(); array_function->shared()->DontAdaptArguments();
// InternalArrays should not use Smi-Only array optimizations. There are too
// many places in the C++ runtime code (e.g. RegEx) that assume that
// elements in InternalArrays can be set to non-Smi values without going
// through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT
// transition easy to trap. Moreover, they rarely are smi-only.
MaybeObject* maybe_map =
array_function->initial_map()->CopyDropTransitions();
Map* new_map;
if (!maybe_map->To<Map>(&new_map)) return maybe_map;
new_map->set_elements_kind(FAST_ELEMENTS);
array_function->set_initial_map(new_map);
// Make "length" magic on instances. // Make "length" magic on instances.
Handle<DescriptorArray> array_descriptors = Handle<DescriptorArray> array_descriptors =
factory()->CopyAppendForeignDescriptor( factory()->CopyAppendForeignDescriptor(
@ -1978,15 +1938,14 @@ bool Genesis::InstallExtension(v8::RegisteredExtension* current) {
if (!InstallExtension(extension->dependencies()[i])) return false; if (!InstallExtension(extension->dependencies()[i])) return false;
} }
Isolate* isolate = Isolate::Current(); Isolate* isolate = Isolate::Current();
Handle<String> source_code = Vector<const char> source = CStrVector(extension->source());
isolate->factory()->NewExternalStringFromAscii(extension->source()); Handle<String> source_code = isolate->factory()->NewStringFromAscii(source);
bool result = CompileScriptCached( bool result = CompileScriptCached(CStrVector(extension->name()),
CStrVector(extension->name()), source_code,
source_code, isolate->bootstrapper()->extensions_cache(),
isolate->bootstrapper()->extensions_cache(), extension,
extension, Handle<Context>(isolate->context()),
Handle<Context>(isolate->context()), false);
false);
ASSERT(isolate->has_pending_exception() != result); ASSERT(isolate->has_pending_exception() != result);
if (!result) { if (!result) {
isolate->clear_pending_exception(); isolate->clear_pending_exception();

146
deps/v8/src/builtins.cc

@ -33,7 +33,6 @@
#include "builtins.h" #include "builtins.h"
#include "gdb-jit.h" #include "gdb-jit.h"
#include "ic-inl.h" #include "ic-inl.h"
#include "mark-compact.h"
#include "vm-state-inl.h" #include "vm-state-inl.h"
namespace v8 { namespace v8 {
@ -203,7 +202,7 @@ BUILTIN(ArrayCodeGeneric) {
} }
// 'array' now contains the JSArray we should initialize. // 'array' now contains the JSArray we should initialize.
ASSERT(array->HasFastTypeElements()); ASSERT(array->HasFastElements());
// Optimize the case where there is one argument and the argument is a // Optimize the case where there is one argument and the argument is a
// small smi. // small smi.
@ -216,8 +215,7 @@ BUILTIN(ArrayCodeGeneric) {
{ MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len); { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
if (!maybe_obj->ToObject(&obj)) return maybe_obj; if (!maybe_obj->ToObject(&obj)) return maybe_obj;
} }
MaybeObject* maybe_obj = array->SetContent(FixedArray::cast(obj)); array->SetContent(FixedArray::cast(obj));
if (maybe_obj->IsFailure()) return maybe_obj;
return array; return array;
} }
} }
@ -241,11 +239,6 @@ BUILTIN(ArrayCodeGeneric) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj; if (!maybe_obj->ToObject(&obj)) return maybe_obj;
} }
// Set length and elements on the array.
MaybeObject* maybe_object =
array->EnsureCanContainElements(FixedArray::cast(obj));
if (maybe_object->IsFailure()) return maybe_object;
AssertNoAllocation no_gc; AssertNoAllocation no_gc;
FixedArray* elms = FixedArray::cast(obj); FixedArray* elms = FixedArray::cast(obj);
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
@ -254,6 +247,7 @@ BUILTIN(ArrayCodeGeneric) {
elms->set(index, args[index+1], mode); elms->set(index, args[index+1], mode);
} }
// Set length and elements on the array.
array->set_elements(FixedArray::cast(obj)); array->set_elements(FixedArray::cast(obj));
array->set_length(len); array->set_length(len);
@ -301,7 +295,6 @@ static void CopyElements(Heap* heap,
if (mode == UPDATE_WRITE_BARRIER) { if (mode == UPDATE_WRITE_BARRIER) {
heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len); heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
} }
heap->incremental_marking()->RecordWrites(dst);
} }
@ -320,7 +313,6 @@ static void MoveElements(Heap* heap,
if (mode == UPDATE_WRITE_BARRIER) { if (mode == UPDATE_WRITE_BARRIER) {
heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len); heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
} }
heap->incremental_marking()->RecordWrites(dst);
} }
@ -366,14 +358,6 @@ static FixedArray* LeftTrimFixedArray(Heap* heap,
former_start[to_trim] = heap->fixed_array_map(); former_start[to_trim] = heap->fixed_array_map();
former_start[to_trim + 1] = Smi::FromInt(len - to_trim); former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
// Maintain marking consistency for HeapObjectIterator and
// IncrementalMarking.
int size_delta = to_trim * kPointerSize;
if (heap->marking()->TransferMark(elms->address(),
elms->address() + size_delta)) {
MemoryChunk::IncrementLiveBytes(elms->address(), -size_delta);
}
return FixedArray::cast(HeapObject::FromAddress( return FixedArray::cast(HeapObject::FromAddress(
elms->address() + to_trim * kPointerSize)); elms->address() + to_trim * kPointerSize));
} }
@ -400,36 +384,15 @@ static bool ArrayPrototypeHasNoElements(Heap* heap,
MUST_USE_RESULT MUST_USE_RESULT
static inline MaybeObject* EnsureJSArrayWithWritableFastElements( static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
Heap* heap, Object* receiver, Arguments* args, int first_added_arg) { Heap* heap, Object* receiver) {
if (!receiver->IsJSArray()) return NULL; if (!receiver->IsJSArray()) return NULL;
JSArray* array = JSArray::cast(receiver); JSArray* array = JSArray::cast(receiver);
HeapObject* elms = array->elements(); HeapObject* elms = array->elements();
Map* map = elms->map(); if (elms->map() == heap->fixed_array_map()) return elms;
if (map == heap->fixed_array_map()) { if (elms->map() == heap->fixed_cow_array_map()) {
if (args == NULL || !array->HasFastSmiOnlyElements()) { return array->EnsureWritableFastElements();
return elms;
}
} else if (map == heap->fixed_cow_array_map()) {
MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
if (args == NULL || !array->HasFastSmiOnlyElements() ||
maybe_writable_result->IsFailure()) {
return maybe_writable_result;
}
} else {
return NULL;
} }
return NULL;
// Need to ensure that the arguments passed in args can be contained in
// the array.
int args_length = args->length();
if (first_added_arg >= args_length) return array->elements();
MaybeObject* maybe_array = array->EnsureCanContainElements(
args,
first_added_arg,
args_length - first_added_arg);
if (maybe_array->IsFailure()) return maybe_array;
return array->elements();
} }
@ -450,18 +413,20 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
HandleScope handleScope(isolate); HandleScope handleScope(isolate);
Handle<Object> js_builtin = Handle<Object> js_builtin =
GetProperty(Handle<JSObject>(isolate->global_context()->builtins()), GetProperty(Handle<JSObject>(
name); isolate->global_context()->builtins()),
Handle<JSFunction> function = Handle<JSFunction>::cast(js_builtin); name);
int argc = args.length() - 1; ASSERT(js_builtin->IsJSFunction());
ScopedVector<Handle<Object> > argv(argc); Handle<JSFunction> function(Handle<JSFunction>::cast(js_builtin));
for (int i = 0; i < argc; ++i) { ScopedVector<Object**> argv(args.length() - 1);
argv[i] = args.at<Object>(i + 1); int n_args = args.length() - 1;
} for (int i = 0; i < n_args; i++) {
bool pending_exception; argv[i] = args.at<Object>(i + 1).location();
}
bool pending_exception = false;
Handle<Object> result = Execution::Call(function, Handle<Object> result = Execution::Call(function,
args.receiver(), args.receiver(),
argc, n_args,
argv.start(), argv.start(),
&pending_exception); &pending_exception);
if (pending_exception) return Failure::Exception(); if (pending_exception) return Failure::Exception();
@ -474,7 +439,7 @@ BUILTIN(ArrayPush) {
Object* receiver = *args.receiver(); Object* receiver = *args.receiver();
Object* elms_obj; Object* elms_obj;
{ MaybeObject* maybe_elms_obj = { MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1); EnsureJSArrayWithWritableFastElements(heap, receiver);
if (maybe_elms_obj == NULL) { if (maybe_elms_obj == NULL) {
return CallJsBuiltin(isolate, "ArrayPush", args); return CallJsBuiltin(isolate, "ArrayPush", args);
} }
@ -510,6 +475,7 @@ BUILTIN(ArrayPush) {
FillWithHoles(heap, new_elms, new_length, capacity); FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms; elms = new_elms;
array->set_elements(elms);
} }
// Add the provided values. // Add the provided values.
@ -519,10 +485,6 @@ BUILTIN(ArrayPush) {
elms->set(index + len, args[index + 1], mode); elms->set(index + len, args[index + 1], mode);
} }
if (elms != array->elements()) {
array->set_elements(elms);
}
// Set the length. // Set the length.
array->set_length(Smi::FromInt(new_length)); array->set_length(Smi::FromInt(new_length));
return Smi::FromInt(new_length); return Smi::FromInt(new_length);
@ -534,7 +496,7 @@ BUILTIN(ArrayPop) {
Object* receiver = *args.receiver(); Object* receiver = *args.receiver();
Object* elms_obj; Object* elms_obj;
{ MaybeObject* maybe_elms_obj = { MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); EnsureJSArrayWithWritableFastElements(heap, receiver);
if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args); if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
} }
@ -567,7 +529,7 @@ BUILTIN(ArrayShift) {
Object* receiver = *args.receiver(); Object* receiver = *args.receiver();
Object* elms_obj; Object* elms_obj;
{ MaybeObject* maybe_elms_obj = { MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); EnsureJSArrayWithWritableFastElements(heap, receiver);
if (maybe_elms_obj == NULL) if (maybe_elms_obj == NULL)
return CallJsBuiltin(isolate, "ArrayShift", args); return CallJsBuiltin(isolate, "ArrayShift", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@ -577,7 +539,7 @@ BUILTIN(ArrayShift) {
} }
FixedArray* elms = FixedArray::cast(elms_obj); FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver); JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastTypeElements()); ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value(); int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value(); if (len == 0) return heap->undefined_value();
@ -589,7 +551,9 @@ BUILTIN(ArrayShift) {
} }
if (!heap->lo_space()->Contains(elms)) { if (!heap->lo_space()->Contains(elms)) {
array->set_elements(LeftTrimFixedArray(heap, elms, 1)); // As elms still in the same space they used to be,
// there is no need to update region dirty mark.
array->set_elements(LeftTrimFixedArray(heap, elms, 1), SKIP_WRITE_BARRIER);
} else { } else {
// Shift the elements. // Shift the elements.
AssertNoAllocation no_gc; AssertNoAllocation no_gc;
@ -609,7 +573,7 @@ BUILTIN(ArrayUnshift) {
Object* receiver = *args.receiver(); Object* receiver = *args.receiver();
Object* elms_obj; Object* elms_obj;
{ MaybeObject* maybe_elms_obj = { MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); EnsureJSArrayWithWritableFastElements(heap, receiver);
if (maybe_elms_obj == NULL) if (maybe_elms_obj == NULL)
return CallJsBuiltin(isolate, "ArrayUnshift", args); return CallJsBuiltin(isolate, "ArrayUnshift", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@ -619,7 +583,7 @@ BUILTIN(ArrayUnshift) {
} }
FixedArray* elms = FixedArray::cast(elms_obj); FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver); JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastTypeElements()); ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value(); int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1; int to_add = args.length() - 1;
@ -628,10 +592,6 @@ BUILTIN(ArrayUnshift) {
// we should never hit this case. // we should never hit this case.
ASSERT(to_add <= (Smi::kMaxValue - len)); ASSERT(to_add <= (Smi::kMaxValue - len));
MaybeObject* maybe_object =
array->EnsureCanContainElements(&args, 1, to_add);
if (maybe_object->IsFailure()) return maybe_object;
if (new_length > elms->length()) { if (new_length > elms->length()) {
// New backing storage is needed. // New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16; int capacity = new_length + (new_length >> 1) + 16;
@ -640,11 +600,13 @@ BUILTIN(ArrayUnshift) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj; if (!maybe_obj->ToObject(&obj)) return maybe_obj;
} }
FixedArray* new_elms = FixedArray::cast(obj); FixedArray* new_elms = FixedArray::cast(obj);
AssertNoAllocation no_gc; AssertNoAllocation no_gc;
if (len > 0) { if (len > 0) {
CopyElements(heap, &no_gc, new_elms, to_add, elms, 0, len); CopyElements(heap, &no_gc, new_elms, to_add, elms, 0, len);
} }
FillWithHoles(heap, new_elms, new_length, capacity); FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms; elms = new_elms;
array->set_elements(elms); array->set_elements(elms);
} else { } else {
@ -672,7 +634,7 @@ BUILTIN(ArraySlice) {
int len = -1; int len = -1;
if (receiver->IsJSArray()) { if (receiver->IsJSArray()) {
JSArray* array = JSArray::cast(receiver); JSArray* array = JSArray::cast(receiver);
if (!array->HasFastTypeElements() || if (!array->HasFastElements() ||
!IsJSArrayFastElementMovingAllowed(heap, array)) { !IsJSArrayFastElementMovingAllowed(heap, array)) {
return CallJsBuiltin(isolate, "ArraySlice", args); return CallJsBuiltin(isolate, "ArraySlice", args);
} }
@ -688,7 +650,7 @@ BUILTIN(ArraySlice) {
bool is_arguments_object_with_fast_elements = bool is_arguments_object_with_fast_elements =
receiver->IsJSObject() receiver->IsJSObject()
&& JSObject::cast(receiver)->map() == arguments_map && JSObject::cast(receiver)->map() == arguments_map
&& JSObject::cast(receiver)->HasFastTypeElements(); && JSObject::cast(receiver)->HasFastElements();
if (!is_arguments_object_with_fast_elements) { if (!is_arguments_object_with_fast_elements) {
return CallJsBuiltin(isolate, "ArraySlice", args); return CallJsBuiltin(isolate, "ArraySlice", args);
} }
@ -759,10 +721,6 @@ BUILTIN(ArraySlice) {
} }
FixedArray* result_elms = FixedArray::cast(result); FixedArray* result_elms = FixedArray::cast(result);
MaybeObject* maybe_object =
result_array->EnsureCanContainElements(result_elms);
if (maybe_object->IsFailure()) return maybe_object;
AssertNoAllocation no_gc; AssertNoAllocation no_gc;
CopyElements(heap, &no_gc, result_elms, 0, elms, k, result_len); CopyElements(heap, &no_gc, result_elms, 0, elms, k, result_len);
@ -780,7 +738,7 @@ BUILTIN(ArraySplice) {
Object* receiver = *args.receiver(); Object* receiver = *args.receiver();
Object* elms_obj; Object* elms_obj;
{ MaybeObject* maybe_elms_obj = { MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3); EnsureJSArrayWithWritableFastElements(heap, receiver);
if (maybe_elms_obj == NULL) if (maybe_elms_obj == NULL)
return CallJsBuiltin(isolate, "ArraySplice", args); return CallJsBuiltin(isolate, "ArraySplice", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@ -790,7 +748,7 @@ BUILTIN(ArraySplice) {
} }
FixedArray* elms = FixedArray::cast(elms_obj); FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver); JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastTypeElements()); ASSERT(array->HasFastElements());
int len = Smi::cast(array->length())->value(); int len = Smi::cast(array->length())->value();
@ -867,9 +825,9 @@ BUILTIN(ArraySplice) {
} }
int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0; int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
int new_length = len - actual_delete_count + item_count; int new_length = len - actual_delete_count + item_count;
bool elms_changed = false;
if (item_count < actual_delete_count) { if (item_count < actual_delete_count) {
// Shrink the array. // Shrink the array.
const bool trim_array = !heap->lo_space()->Contains(elms) && const bool trim_array = !heap->lo_space()->Contains(elms) &&
@ -884,8 +842,7 @@ BUILTIN(ArraySplice) {
} }
elms = LeftTrimFixedArray(heap, elms, delta); elms = LeftTrimFixedArray(heap, elms, delta);
array->set_elements(elms, SKIP_WRITE_BARRIER);
elms_changed = true;
} else { } else {
AssertNoAllocation no_gc; AssertNoAllocation no_gc;
MoveElements(heap, &no_gc, MoveElements(heap, &no_gc,
@ -925,7 +882,7 @@ BUILTIN(ArraySplice) {
FillWithHoles(heap, new_elms, new_length, capacity); FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms; elms = new_elms;
elms_changed = true; array->set_elements(elms);
} else { } else {
AssertNoAllocation no_gc; AssertNoAllocation no_gc;
MoveElements(heap, &no_gc, MoveElements(heap, &no_gc,
@ -941,10 +898,6 @@ BUILTIN(ArraySplice) {
elms->set(k, args[3 + k - actual_start], mode); elms->set(k, args[3 + k - actual_start], mode);
} }
if (elms_changed) {
array->set_elements(elms);
}
// Set the length. // Set the length.
array->set_length(Smi::FromInt(new_length)); array->set_length(Smi::FromInt(new_length));
@ -967,7 +920,7 @@ BUILTIN(ArrayConcat) {
int result_len = 0; int result_len = 0;
for (int i = 0; i < n_arguments; i++) { for (int i = 0; i < n_arguments; i++) {
Object* arg = args[i]; Object* arg = args[i];
if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastTypeElements() if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements()
|| JSArray::cast(arg)->GetPrototype() != array_proto) { || JSArray::cast(arg)->GetPrototype() != array_proto) {
return CallJsBuiltin(isolate, "ArrayConcat", args); return CallJsBuiltin(isolate, "ArrayConcat", args);
} }
@ -1003,17 +956,6 @@ BUILTIN(ArrayConcat) {
} }
FixedArray* result_elms = FixedArray::cast(result); FixedArray* result_elms = FixedArray::cast(result);
// Ensure element type transitions happen before copying elements in.
if (result_array->HasFastSmiOnlyElements()) {
for (int i = 0; i < n_arguments; i++) {
JSArray* array = JSArray::cast(args[i]);
if (!array->HasFastSmiOnlyElements()) {
result_array->EnsureCanContainNonSmiElements();
break;
}
}
}
// Copy data. // Copy data.
AssertNoAllocation no_gc; AssertNoAllocation no_gc;
int start_pos = 0; int start_pos = 0;
@ -1665,22 +1607,20 @@ void Builtins::Setup(bool create_heap_objects) {
const BuiltinDesc* functions = BuiltinFunctionTable::functions(); const BuiltinDesc* functions = BuiltinFunctionTable::functions();
// For now we generate builtin adaptor code into a stack-allocated // For now we generate builtin adaptor code into a stack-allocated
// buffer, before copying it into individual code objects. Be careful // buffer, before copying it into individual code objects.
// with alignment, some platforms don't like unaligned code. byte buffer[4*KB];
union { int force_alignment; byte buffer[4*KB]; } u;
// Traverse the list of builtins and generate an adaptor in a // Traverse the list of builtins and generate an adaptor in a
// separate code object for each one. // separate code object for each one.
for (int i = 0; i < builtin_count; i++) { for (int i = 0; i < builtin_count; i++) {
if (create_heap_objects) { if (create_heap_objects) {
MacroAssembler masm(isolate, u.buffer, sizeof u.buffer); MacroAssembler masm(isolate, buffer, sizeof buffer);
// Generate the code/adaptor. // Generate the code/adaptor.
typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments); typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
Generator g = FUNCTION_CAST<Generator>(functions[i].generator); Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
// We pass all arguments to the generator, but it may not use all of // We pass all arguments to the generator, but it may not use all of
// them. This works because the first arguments are on top of the // them. This works because the first arguments are on top of the
// stack. // stack.
ASSERT(!masm.has_frame());
g(&masm, functions[i].name, functions[i].extra_args); g(&masm, functions[i].name, functions[i].extra_args);
// Move the code into the object heap. // Move the code into the object heap.
CodeDesc desc; CodeDesc desc;

12
deps/v8/src/cached-powers.cc

@ -134,12 +134,14 @@ static const CachedPower kCachedPowers[] = {
}; };
static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers); static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers);
static const int kCachedPowersOffset = 348; // -1 * the first decimal_exponent. static const int kCachedPowersOffset = -kCachedPowers[0].decimal_exponent;
static const double kD_1_LOG2_10 = 0.30102999566398114; // 1 / lg(10) static const double kD_1_LOG2_10 = 0.30102999566398114; // 1 / lg(10)
// Difference between the decimal exponents in the table above. const int PowersOfTenCache::kDecimalExponentDistance =
const int PowersOfTenCache::kDecimalExponentDistance = 8; kCachedPowers[1].decimal_exponent - kCachedPowers[0].decimal_exponent;
const int PowersOfTenCache::kMinDecimalExponent = -348; const int PowersOfTenCache::kMinDecimalExponent =
const int PowersOfTenCache::kMaxDecimalExponent = 340; kCachedPowers[0].decimal_exponent;
const int PowersOfTenCache::kMaxDecimalExponent =
kCachedPowers[kCachedPowersLength - 1].decimal_exponent;
void PowersOfTenCache::GetCachedPowerForBinaryExponentRange( void PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
int min_exponent, int min_exponent,

45
deps/v8/src/code-stubs.cc

@ -52,12 +52,11 @@ void CodeStub::GenerateCode(MacroAssembler* masm) {
// Update the static counter each time a new code stub is generated. // Update the static counter each time a new code stub is generated.
masm->isolate()->counters()->code_stubs()->Increment(); masm->isolate()->counters()->code_stubs()->Increment();
// Nested stubs are not allowed for leaves. // Nested stubs are not allowed for leafs.
AllowStubCallsScope allow_scope(masm, false); AllowStubCallsScope allow_scope(masm, AllowsStubCalls());
// Generate the code for the stub. // Generate the code for the stub.
masm->set_generating_stub(true); masm->set_generating_stub(true);
NoCurrentFrameScope scope(masm);
Generate(masm); Generate(masm);
} }
@ -128,10 +127,8 @@ Handle<Code> CodeStub::GetCode() {
GetKey(), GetKey(),
new_object); new_object);
heap->public_set_code_stubs(*dict); heap->public_set_code_stubs(*dict);
code = *new_object; code = *new_object;
Activate(code);
} else {
CHECK(IsPregenerated() == code->is_pregenerated());
} }
ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code)); ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code));
@ -169,11 +166,7 @@ MaybeObject* CodeStub::TryGetCode() {
heap->code_stubs()->AtNumberPut(GetKey(), code); heap->code_stubs()->AtNumberPut(GetKey(), code);
if (maybe_new_object->ToObject(&new_object)) { if (maybe_new_object->ToObject(&new_object)) {
heap->public_set_code_stubs(NumberDictionary::cast(new_object)); heap->public_set_code_stubs(NumberDictionary::cast(new_object));
} else if (MustBeInStubCache()) {
return maybe_new_object;
} }
Activate(code);
} }
return code; return code;
@ -195,11 +188,6 @@ const char* CodeStub::MajorName(CodeStub::Major major_key,
} }
void CodeStub::PrintName(StringStream* stream) {
stream->Add("%s", MajorName(MajorKey(), false));
}
int ICCompareStub::MinorKey() { int ICCompareStub::MinorKey() {
return OpField::encode(op_ - Token::EQ) | StateField::encode(state_); return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
} }
@ -257,7 +245,6 @@ void InstanceofStub::PrintName(StringStream* stream) {
void KeyedLoadElementStub::Generate(MacroAssembler* masm) { void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) { switch (elements_kind_) {
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastElement(masm); KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
break; break;
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
@ -287,11 +274,7 @@ void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
void KeyedStoreElementStub::Generate(MacroAssembler* masm) { void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) { switch (elements_kind_) {
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS: { KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_);
KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_js_array_,
elements_kind_);
}
break; break;
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm, KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
@ -319,20 +302,24 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::PrintName(StringStream* stream) { void ArgumentsAccessStub::PrintName(StringStream* stream) {
stream->Add("ArgumentsAccessStub_"); const char* type_name = NULL; // Make g++ happy.
switch (type_) { switch (type_) {
case READ_ELEMENT: stream->Add("ReadElement"); break; case READ_ELEMENT: type_name = "ReadElement"; break;
case NEW_NON_STRICT_FAST: stream->Add("NewNonStrictFast"); break; case NEW_NON_STRICT_FAST: type_name = "NewNonStrictFast"; break;
case NEW_NON_STRICT_SLOW: stream->Add("NewNonStrictSlow"); break; case NEW_NON_STRICT_SLOW: type_name = "NewNonStrictSlow"; break;
case NEW_STRICT: stream->Add("NewStrict"); break; case NEW_STRICT: type_name = "NewStrict"; break;
} }
stream->Add("ArgumentsAccessStub_%s", type_name);
} }
void CallFunctionStub::PrintName(StringStream* stream) { void CallFunctionStub::PrintName(StringStream* stream) {
stream->Add("CallFunctionStub_Args%d", argc_); const char* flags_name = NULL; // Make g++ happy.
if (ReceiverMightBeImplicit()) stream->Add("_Implicit"); switch (flags_) {
if (RecordCallTarget()) stream->Add("_Recording"); case NO_CALL_FUNCTION_FLAGS: flags_name = ""; break;
case RECEIVER_MIGHT_BE_IMPLICIT: flags_name = "_Implicit"; break;
}
stream->Add("CallFunctionStub_Args%d%s", argc_, flags_name);
} }

117
deps/v8/src/code-stubs.h

@ -45,23 +45,27 @@ namespace internal {
V(Compare) \ V(Compare) \
V(CompareIC) \ V(CompareIC) \
V(MathPow) \ V(MathPow) \
V(RecordWrite) \
V(StoreBufferOverflow) \
V(RegExpExec) \
V(TranscendentalCache) \ V(TranscendentalCache) \
V(Instanceof) \ V(Instanceof) \
/* All stubs above this line only exist in a few versions, which are */ \
/* generated ahead of time. Therefore compiling a call to one of */ \
/* them can't cause a new stub to be compiled, so compiling a call to */ \
/* them is GC safe. The ones below this line exist in many variants */ \
/* so code compiling a call to one can cause a GC. This means they */ \
/* can't be called from other stubs, since stub generation code is */ \
/* not GC safe. */ \
V(ConvertToDouble) \ V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \ V(WriteInt32ToHeapNumber) \
V(StackCheck) \ V(StackCheck) \
V(FastNewClosure) \ V(FastNewClosure) \
V(FastNewContext) \ V(FastNewContext) \
V(FastNewBlockContext) \
V(FastCloneShallowArray) \ V(FastCloneShallowArray) \
V(RevertToNumber) \ V(RevertToNumber) \
V(ToBoolean) \ V(ToBoolean) \
V(ToNumber) \ V(ToNumber) \
V(CounterOp) \ V(CounterOp) \
V(ArgumentsAccess) \ V(ArgumentsAccess) \
V(RegExpExec) \
V(RegExpConstructResult) \ V(RegExpConstructResult) \
V(NumberToString) \ V(NumberToString) \
V(CEntry) \ V(CEntry) \
@ -69,7 +73,7 @@ namespace internal {
V(KeyedLoadElement) \ V(KeyedLoadElement) \
V(KeyedStoreElement) \ V(KeyedStoreElement) \
V(DebuggerStatement) \ V(DebuggerStatement) \
V(StringDictionaryLookup) V(StringDictionaryNegativeLookup)
// List of code stubs only used on ARM platforms. // List of code stubs only used on ARM platforms.
#ifdef V8_TARGET_ARCH_ARM #ifdef V8_TARGET_ARCH_ARM
@ -138,27 +142,6 @@ class CodeStub BASE_EMBEDDED {
virtual ~CodeStub() {} virtual ~CodeStub() {}
bool CompilingCallsToThisStubIsGCSafe() {
bool is_pregenerated = IsPregenerated();
Code* code = NULL;
CHECK(!is_pregenerated || FindCodeInCache(&code));
return is_pregenerated;
}
// See comment above, where Instanceof is defined.
virtual bool IsPregenerated() { return false; }
static void GenerateStubsAheadOfTime();
static void GenerateFPStubs();
// Some stubs put untagged junk on the stack that cannot be scanned by the
// GC. This means that we must be statically sure that no GC can occur while
// they are running. If that is the case they should override this to return
// true, which will cause an assertion if we try to call something that can
// GC or if we try to put a stack frame on top of the junk, which would not
// result in a traversable stack.
virtual bool SometimesSetsUpAFrame() { return true; }
protected: protected:
static const int kMajorBits = 6; static const int kMajorBits = 6;
static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits; static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
@ -181,14 +164,6 @@ class CodeStub BASE_EMBEDDED {
// Finish the code object after it has been generated. // Finish the code object after it has been generated.
virtual void FinishCode(Code* code) { } virtual void FinishCode(Code* code) { }
// Returns true if TryGetCode should fail if it failed
// to register newly generated stub in the stub cache.
virtual bool MustBeInStubCache() { return false; }
// Activate newly generated stub. Is called after
// registering stub in the stub cache.
virtual void Activate(Code* code) { }
// Returns information for computing the number key. // Returns information for computing the number key.
virtual Major MajorKey() = 0; virtual Major MajorKey() = 0;
virtual int MinorKey() = 0; virtual int MinorKey() = 0;
@ -203,7 +178,9 @@ class CodeStub BASE_EMBEDDED {
// Returns a name for logging/debugging purposes. // Returns a name for logging/debugging purposes.
SmartArrayPointer<const char> GetName(); SmartArrayPointer<const char> GetName();
virtual void PrintName(StringStream* stream); virtual void PrintName(StringStream* stream) {
stream->Add("%s", MajorName(MajorKey(), false));
}
// Returns whether the code generated for this stub needs to be allocated as // Returns whether the code generated for this stub needs to be allocated as
// a fixed (non-moveable) code object. // a fixed (non-moveable) code object.
@ -216,6 +193,9 @@ class CodeStub BASE_EMBEDDED {
MajorKeyBits::encode(MajorKey()); MajorKeyBits::encode(MajorKey());
} }
// See comment above, where Instanceof is defined.
bool AllowsStubCalls() { return MajorKey() <= Instanceof; }
class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {}; class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {};
class MinorKeyBits: public BitField<uint32_t, kMajorBits, kMinorBits> {}; class MinorKeyBits: public BitField<uint32_t, kMajorBits, kMinorBits> {};
@ -324,7 +304,7 @@ class FastNewContextStub : public CodeStub {
static const int kMaximumSlots = 64; static const int kMaximumSlots = 64;
explicit FastNewContextStub(int slots) : slots_(slots) { explicit FastNewContextStub(int slots) : slots_(slots) {
ASSERT(slots_ > 0 && slots_ <= kMaximumSlots); ASSERT(slots_ > 0 && slots <= kMaximumSlots);
} }
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
@ -337,24 +317,6 @@ class FastNewContextStub : public CodeStub {
}; };
class FastNewBlockContextStub : public CodeStub {
public:
static const int kMaximumSlots = 64;
explicit FastNewBlockContextStub(int slots) : slots_(slots) {
ASSERT(slots_ > 0 && slots_ <= kMaximumSlots);
}
void Generate(MacroAssembler* masm);
private:
int slots_;
Major MajorKey() { return FastNewBlockContext; }
int MinorKey() { return slots_; }
};
class FastCloneShallowArrayStub : public CodeStub { class FastCloneShallowArrayStub : public CodeStub {
public: public:
// Maximum length of copied elements array. // Maximum length of copied elements array.
@ -569,18 +531,11 @@ class CompareStub: public CodeStub {
class CEntryStub : public CodeStub { class CEntryStub : public CodeStub {
public: public:
explicit CEntryStub(int result_size, explicit CEntryStub(int result_size)
SaveFPRegsMode save_doubles = kDontSaveFPRegs) : result_size_(result_size), save_doubles_(false) { }
: result_size_(result_size), save_doubles_(save_doubles) { }
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
void SaveDoubles() { save_doubles_ = true; }
// The version of this stub that doesn't save doubles is generated ahead of
// time, so it's OK to call it from other stubs that can't cope with GC during
// their code generation. On machines that always have gp registers (x64) we
// can generate both variants ahead of time.
virtual bool IsPregenerated();
static void GenerateAheadOfTime();
private: private:
void GenerateCore(MacroAssembler* masm, void GenerateCore(MacroAssembler* masm,
@ -595,7 +550,7 @@ class CEntryStub : public CodeStub {
// Number of pointers/values returned. // Number of pointers/values returned.
const int result_size_; const int result_size_;
SaveFPRegsMode save_doubles_; bool save_doubles_;
Major MajorKey() { return CEntry; } Major MajorKey() { return CEntry; }
int MinorKey(); int MinorKey();
@ -692,32 +647,10 @@ class CallFunctionStub: public CodeStub {
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
virtual void FinishCode(Code* code);
static void Clear(Heap* heap, Address address);
static Object* GetCachedValue(Address address);
static int ExtractArgcFromMinorKey(int minor_key) { static int ExtractArgcFromMinorKey(int minor_key) {
return ArgcBits::decode(minor_key); return ArgcBits::decode(minor_key);
} }
// The object that indicates an uninitialized cache.
static Handle<Object> UninitializedSentinel(Isolate* isolate) {
return isolate->factory()->the_hole_value();
}
// A raw version of the uninitialized sentinel that's safe to read during
// garbage collection (e.g., for patching the cache).
static Object* RawUninitializedSentinel(Heap* heap) {
return heap->raw_unchecked_the_hole_value();
}
// The object that indicates a megamorphic state.
static Handle<Object> MegamorphicSentinel(Isolate* isolate) {
return isolate->factory()->undefined_value();
}
private: private:
int argc_; int argc_;
CallFunctionFlags flags_; CallFunctionFlags flags_;
@ -725,8 +658,8 @@ class CallFunctionStub: public CodeStub {
virtual void PrintName(StringStream* stream); virtual void PrintName(StringStream* stream);
// Minor key encoding in 32 bits with Bitfield <Type, shift, size>. // Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
class FlagBits: public BitField<CallFunctionFlags, 0, 2> {}; class FlagBits: public BitField<CallFunctionFlags, 0, 1> {};
class ArgcBits: public BitField<unsigned, 2, 32 - 2> {}; class ArgcBits: public BitField<unsigned, 1, 32 - 1> {};
Major MajorKey() { return CallFunction; } Major MajorKey() { return CallFunction; }
int MinorKey() { int MinorKey() {
@ -737,10 +670,6 @@ class CallFunctionStub: public CodeStub {
bool ReceiverMightBeImplicit() { bool ReceiverMightBeImplicit() {
return (flags_ & RECEIVER_MIGHT_BE_IMPLICIT) != 0; return (flags_ & RECEIVER_MIGHT_BE_IMPLICIT) != 0;
} }
bool RecordCallTarget() {
return (flags_ & RECORD_CALL_TARGET) != 0;
}
}; };
@ -1005,8 +934,6 @@ class ToBooleanStub: public CodeStub {
virtual int GetCodeKind() { return Code::TO_BOOLEAN_IC; } virtual int GetCodeKind() { return Code::TO_BOOLEAN_IC; }
virtual void PrintName(StringStream* stream); virtual void PrintName(StringStream* stream);
virtual bool SometimesSetsUpAFrame() { return false; }
private: private:
Major MajorKey() { return ToBoolean; } Major MajorKey() { return ToBoolean; }
int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) | types_.ToByte(); } int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) | types_.ToByte(); }

2
deps/v8/src/codegen.cc

@ -218,8 +218,8 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
int CEntryStub::MinorKey() { int CEntryStub::MinorKey() {
int result = (save_doubles_ == kSaveFPRegs) ? 1 : 0;
ASSERT(result_size_ == 1 || result_size_ == 2); ASSERT(result_size_ == 1 || result_size_ == 2);
int result = save_doubles_ ? 1 : 0;
#ifdef _WIN64 #ifdef _WIN64
return result | ((result_size_ == 1) ? 0 : 2); return result | ((result_size_ == 1) ? 0 : 2);
#else #else

77
deps/v8/src/compiler-intrinsics.h

@ -1,77 +0,0 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_COMPILER_INTRINSICS_H_
#define V8_COMPILER_INTRINSICS_H_
namespace v8 {
namespace internal {
class CompilerIntrinsics {
public:
// Returns number of zero bits preceding least significant 1 bit.
// Undefined for zero value.
INLINE(static int CountTrailingZeros(uint32_t value));
// Returns number of zero bits following most significant 1 bit.
// Undefined for zero value.
INLINE(static int CountLeadingZeros(uint32_t value));
};
#ifdef __GNUC__
int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
return __builtin_ctz(value);
}
int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
return __builtin_clz(value);
}
#elif defined(_MSC_VER)
#pragma intrinsic(_BitScanForward)
#pragma intrinsic(_BitScanReverse)
int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
unsigned long result; //NOLINT
_BitScanForward(&result, static_cast<long>(value)); //NOLINT
return static_cast<int>(result);
}
int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
unsigned long result; //NOLINT
_BitScanReverse(&result, static_cast<long>(value)); //NOLINT
return 31 - static_cast<int>(result);
}
#else
#error Unsupported compiler
#endif
} } // namespace v8::internal
#endif // V8_COMPILER_INTRINSICS_H_

3
deps/v8/src/compiler.cc

@ -36,7 +36,6 @@
#include "full-codegen.h" #include "full-codegen.h"
#include "gdb-jit.h" #include "gdb-jit.h"
#include "hydrogen.h" #include "hydrogen.h"
#include "isolate-inl.h"
#include "lithium.h" #include "lithium.h"
#include "liveedit.h" #include "liveedit.h"
#include "parser.h" #include "parser.h"
@ -276,7 +275,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
} }
Handle<Context> global_context(info->closure()->context()->global_context()); Handle<Context> global_context(info->closure()->context()->global_context());
TypeFeedbackOracle oracle(code, global_context, info->isolate()); TypeFeedbackOracle oracle(code, global_context);
HGraphBuilder builder(info, &oracle); HGraphBuilder builder(info, &oracle);
HPhase phase(HPhase::kTotal); HPhase phase(HPhase::kTotal);
HGraph* graph = builder.CreateGraph(); HGraph* graph = builder.CreateGraph();

100
deps/v8/src/contexts.cc

@ -86,14 +86,14 @@ void Context::set_global_proxy(JSObject* object) {
Handle<Object> Context::Lookup(Handle<String> name, Handle<Object> Context::Lookup(Handle<String> name,
ContextLookupFlags flags, ContextLookupFlags flags,
int* index, int* index_,
PropertyAttributes* attributes, PropertyAttributes* attributes,
BindingFlags* binding_flags) { BindingFlags* binding_flags) {
Isolate* isolate = GetIsolate(); Isolate* isolate = GetIsolate();
Handle<Context> context(this, isolate); Handle<Context> context(this, isolate);
bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0; bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
*index = -1; *index_ = -1;
*attributes = ABSENT; *attributes = ABSENT;
*binding_flags = MISSING_BINDING; *binding_flags = MISSING_BINDING;
@ -110,50 +110,70 @@ Handle<Object> Context::Lookup(Handle<String> name,
PrintF("\n"); PrintF("\n");
} }
// 1. Check global objects, subjects of with, and extension objects. // Check extension/with/global object.
if (context->IsGlobalContext() || if (!context->IsBlockContext() && context->has_extension()) {
context->IsWithContext() || if (context->IsCatchContext()) {
(context->IsFunctionContext() && context->has_extension())) { // Catch contexts have the variable name in the extension slot.
Handle<JSObject> object(JSObject::cast(context->extension()), isolate); if (name->Equals(String::cast(context->extension()))) {
// Context extension objects needs to behave as if they have no if (FLAG_trace_contexts) {
// prototype. So even if we want to follow prototype chains, we need PrintF("=> found in catch context\n");
// to only do a local lookup for context extension objects. }
if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 || *index_ = Context::THROWN_OBJECT_INDEX;
object->IsJSContextExtensionObject()) { *attributes = NONE;
*attributes = object->GetLocalPropertyAttribute(*name); *binding_flags = MUTABLE_IS_INITIALIZED;
return context;
}
} else { } else {
*attributes = object->GetPropertyAttribute(*name); ASSERT(context->IsGlobalContext() ||
} context->IsFunctionContext() ||
if (*attributes != ABSENT) { context->IsWithContext());
if (FLAG_trace_contexts) { // Global, function, and with contexts may have an object in the
PrintF("=> found property in context object %p\n", // extension slot.
reinterpret_cast<void*>(*object)); Handle<JSObject> extension(JSObject::cast(context->extension()),
isolate);
// Context extension objects needs to behave as if they have no
// prototype. So even if we want to follow prototype chains, we
// need to only do a local lookup for context extension objects.
if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
extension->IsJSContextExtensionObject()) {
*attributes = extension->GetLocalPropertyAttribute(*name);
} else {
*attributes = extension->GetPropertyAttribute(*name);
}
if (*attributes != ABSENT) {
// property found
if (FLAG_trace_contexts) {
PrintF("=> found property in context object %p\n",
reinterpret_cast<void*>(*extension));
}
return extension;
} }
return object;
} }
} }
// 2. Check the context proper if it has slots. // Check serialized scope information of functions and blocks. Only
// functions can have parameters, and a function name.
if (context->IsFunctionContext() || context->IsBlockContext()) { if (context->IsFunctionContext() || context->IsBlockContext()) {
// Use serialized scope information of functions and blocks to search // We may have context-local slots. Check locals in the context.
// for the context index.
Handle<SerializedScopeInfo> scope_info; Handle<SerializedScopeInfo> scope_info;
if (context->IsFunctionContext()) { if (context->IsFunctionContext()) {
scope_info = Handle<SerializedScopeInfo>( scope_info = Handle<SerializedScopeInfo>(
context->closure()->shared()->scope_info(), isolate); context->closure()->shared()->scope_info(), isolate);
} else { } else {
ASSERT(context->IsBlockContext());
scope_info = Handle<SerializedScopeInfo>( scope_info = Handle<SerializedScopeInfo>(
SerializedScopeInfo::cast(context->extension()), isolate); SerializedScopeInfo::cast(context->extension()), isolate);
} }
Variable::Mode mode; Variable::Mode mode;
int slot_index = scope_info->ContextSlotIndex(*name, &mode); int index = scope_info->ContextSlotIndex(*name, &mode);
ASSERT(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS); ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
if (slot_index >= 0) { if (index >= 0) {
if (FLAG_trace_contexts) { if (FLAG_trace_contexts) {
PrintF("=> found local in context slot %d (mode = %d)\n", PrintF("=> found local in context slot %d (mode = %d)\n",
slot_index, mode); index, mode);
} }
*index = slot_index; *index_ = index;
// Note: Fixed context slots are statically allocated by the compiler. // Note: Fixed context slots are statically allocated by the compiler.
// Statically allocated variables always have a statically known mode, // Statically allocated variables always have a statically known mode,
// which is the mode with which they were declared when added to the // which is the mode with which they were declared when added to the
@ -186,34 +206,22 @@ Handle<Object> Context::Lookup(Handle<String> name,
// Check the slot corresponding to the intermediate context holding // Check the slot corresponding to the intermediate context holding
// only the function name variable. // only the function name variable.
if (follow_context_chain && context->IsFunctionContext()) { if (follow_context_chain) {
int function_index = scope_info->FunctionContextSlotIndex(*name); int index = scope_info->FunctionContextSlotIndex(*name);
if (function_index >= 0) { if (index >= 0) {
if (FLAG_trace_contexts) { if (FLAG_trace_contexts) {
PrintF("=> found intermediate function in context slot %d\n", PrintF("=> found intermediate function in context slot %d\n",
function_index); index);
} }
*index = function_index; *index_ = index;
*attributes = READ_ONLY; *attributes = READ_ONLY;
*binding_flags = IMMUTABLE_IS_INITIALIZED; *binding_flags = IMMUTABLE_IS_INITIALIZED;
return context; return context;
} }
} }
} else if (context->IsCatchContext()) {
// Catch contexts have the variable name in the extension slot.
if (name->Equals(String::cast(context->extension()))) {
if (FLAG_trace_contexts) {
PrintF("=> found in catch context\n");
}
*index = Context::THROWN_OBJECT_INDEX;
*attributes = NONE;
*binding_flags = MUTABLE_IS_INITIALIZED;
return context;
}
} }
// 3. Prepare to continue with the previous (next outermost) context. // Proceed with the previous context.
if (context->IsGlobalContext()) { if (context->IsGlobalContext()) {
follow_context_chain = false; follow_context_chain = false;
} else { } else {

41
deps/v8/src/contexts.h

@ -134,8 +134,6 @@ enum BindingFlags {
V(MAP_CACHE_INDEX, Object, map_cache) \ V(MAP_CACHE_INDEX, Object, map_cache) \
V(CONTEXT_DATA_INDEX, Object, data) \ V(CONTEXT_DATA_INDEX, Object, data) \
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \ V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
to_complete_property_descriptor) \
V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \ V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \ V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap)
@ -254,7 +252,6 @@ class Context: public FixedArray {
OUT_OF_MEMORY_INDEX, OUT_OF_MEMORY_INDEX,
CONTEXT_DATA_INDEX, CONTEXT_DATA_INDEX,
ALLOW_CODE_GEN_FROM_STRINGS_INDEX, ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
DERIVED_HAS_TRAP_INDEX, DERIVED_HAS_TRAP_INDEX,
DERIVED_GET_TRAP_INDEX, DERIVED_GET_TRAP_INDEX,
DERIVED_SET_TRAP_INDEX, DERIVED_SET_TRAP_INDEX,
@ -333,6 +330,12 @@ class Context: public FixedArray {
// Mark the global context with out of memory. // Mark the global context with out of memory.
inline void mark_out_of_memory(); inline void mark_out_of_memory();
// The exception holder is the object used as a with object in
// the implementation of a catch block.
bool is_exception_holder(Object* object) {
return IsCatchContext() && extension() == object;
}
// A global context hold a list of all functions which have been optimized. // A global context hold a list of all functions which have been optimized.
void AddOptimizedFunction(JSFunction* function); void AddOptimizedFunction(JSFunction* function);
void RemoveOptimizedFunction(JSFunction* function); void RemoveOptimizedFunction(JSFunction* function);
@ -352,25 +355,29 @@ class Context: public FixedArray {
#undef GLOBAL_CONTEXT_FIELD_ACCESSORS #undef GLOBAL_CONTEXT_FIELD_ACCESSORS
// Lookup the the slot called name, starting with the current context. // Lookup the the slot called name, starting with the current context.
// There are three possibilities: // There are 4 possible outcomes:
//
// 1) index_ >= 0 && result->IsContext():
// most common case, the result is a Context, and index is the
// context slot index, and the slot exists.
// attributes == READ_ONLY for the function name variable, NONE otherwise.
// //
// 1) result->IsContext(): // 2) index_ >= 0 && result->IsJSObject():
// The binding was found in a context. *index is always the // the result is the JSObject arguments object, the index is the parameter
// non-negative slot index. *attributes is NONE for var and let // index, i.e., key into the arguments object, and the property exists.
// declarations, READ_ONLY for const declarations (never ABSENT). // attributes != ABSENT.
// //
// 2) result->IsJSObject(): // 3) index_ < 0 && result->IsJSObject():
// The binding was found as a named property in a context extension // the result is the JSObject extension context or the global object,
// object (i.e., was introduced via eval), as a property on the subject // and the name is the property name, and the property exists.
// of with, or as a property of the global object. *index is -1 and // attributes != ABSENT.
// *attributes is not ABSENT.
// //
// 3) result.is_null(): // 4) index_ < 0 && result.is_null():
// There was no binding found, *index is always -1 and *attributes is // there was no context found with the corresponding property.
// always ABSENT. // attributes == ABSENT.
Handle<Object> Lookup(Handle<String> name, Handle<Object> Lookup(Handle<String> name,
ContextLookupFlags flags, ContextLookupFlags flags,
int* index, int* index_,
PropertyAttributes* attributes, PropertyAttributes* attributes,
BindingFlags* binding_flags); BindingFlags* binding_flags);

2
deps/v8/src/conversions-inl.h

@ -47,7 +47,7 @@ namespace v8 {
namespace internal { namespace internal {
static inline double JunkStringValue() { static inline double JunkStringValue() {
return BitCast<double, uint64_t>(kQuietNaNMask); return std::numeric_limits<double>::quiet_NaN();
} }

2
deps/v8/src/conversions.h

@ -28,6 +28,8 @@
#ifndef V8_CONVERSIONS_H_ #ifndef V8_CONVERSIONS_H_
#define V8_CONVERSIONS_H_ #define V8_CONVERSIONS_H_
#include <limits>
#include "utils.h" #include "utils.h"
namespace v8 { namespace v8 {

2
deps/v8/src/cpu-profiler.cc

@ -551,12 +551,12 @@ void CpuProfiler::StopProcessor() {
sampler->Stop(); sampler->Stop();
need_to_stop_sampler_ = false; need_to_stop_sampler_ = false;
} }
NoBarrier_Store(&is_profiling_, false);
processor_->Stop(); processor_->Stop();
processor_->Join(); processor_->Join();
delete processor_; delete processor_;
delete generator_; delete generator_;
processor_ = NULL; processor_ = NULL;
NoBarrier_Store(&is_profiling_, false);
generator_ = NULL; generator_ = NULL;
logger->logging_nesting_ = saved_logging_nesting_; logger->logging_nesting_ = saved_logging_nesting_;
} }

5
deps/v8/src/d8-debug.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifdef ENABLE_DEBUGGER_SUPPORT
#include "d8.h" #include "d8.h"
#include "d8-debug.h" #include "d8-debug.h"
@ -368,5 +367,3 @@ void KeyboardThread::Run() {
} // namespace v8 } // namespace v8
#endif // ENABLE_DEBUGGER_SUPPORT

34
deps/v8/src/d8.cc

@ -146,11 +146,11 @@ bool Shell::ExecuteString(Handle<String> source,
Handle<Value> name, Handle<Value> name,
bool print_result, bool print_result,
bool report_exceptions) { bool report_exceptions) {
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT) #ifndef V8_SHARED
bool FLAG_debugger = i::FLAG_debugger; bool FLAG_debugger = i::FLAG_debugger;
#else #else
bool FLAG_debugger = false; bool FLAG_debugger = false;
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT #endif // V8_SHARED
HandleScope handle_scope; HandleScope handle_scope;
TryCatch try_catch; TryCatch try_catch;
options.script_executed = true; options.script_executed = true;
@ -594,7 +594,6 @@ void Shell::InstallUtilityScript() {
Context::Scope utility_scope(utility_context_); Context::Scope utility_scope(utility_context_);
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
if (i::FLAG_debugger) printf("JavaScript debugger enabled\n");
// Install the debugger object in the utility scope // Install the debugger object in the utility scope
i::Debug* debug = i::Isolate::Current()->debug(); i::Debug* debug = i::Isolate::Current()->debug();
debug->Load(); debug->Load();
@ -817,7 +816,7 @@ void Shell::OnExit() {
static FILE* FOpen(const char* path, const char* mode) { static FILE* FOpen(const char* path, const char* mode) {
#if defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64)) #if (defined(_WIN32) || defined(_WIN64))
FILE* result; FILE* result;
if (fopen_s(&result, path, mode) == 0) { if (fopen_s(&result, path, mode) == 0) {
return result; return result;
@ -901,6 +900,9 @@ void Shell::RunShell() {
#ifndef V8_SHARED #ifndef V8_SHARED
console = LineEditor::Get(); console = LineEditor::Get();
printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name()); printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
if (i::FLAG_debugger) {
printf("JavaScript debugger enabled\n");
}
console->Open(); console->Open();
while (true) { while (true) {
i::SmartArrayPointer<char> input = console->Prompt(Shell::kPrompt); i::SmartArrayPointer<char> input = console->Prompt(Shell::kPrompt);
@ -1251,22 +1253,14 @@ int Shell::RunMain(int argc, char* argv[]) {
Locker lock; Locker lock;
HandleScope scope; HandleScope scope;
Persistent<Context> context = CreateEvaluationContext(); Persistent<Context> context = CreateEvaluationContext();
if (options.last_run) {
// Keep using the same context in the interactive shell.
evaluation_context_ = context;
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
// If the interactive debugger is enabled make sure to activate
// it before running the files passed on the command line.
if (i::FLAG_debugger) {
InstallUtilityScript();
}
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
}
{ {
Context::Scope cscope(context); Context::Scope cscope(context);
options.isolate_sources[0].Execute(); options.isolate_sources[0].Execute();
} }
if (!options.last_run) { if (options.last_run) {
// Keep using the same context in the interactive shell
evaluation_context_ = context;
} else {
context.Dispose(); context.Dispose();
} }
@ -1337,11 +1331,9 @@ int Shell::Main(int argc, char* argv[]) {
if (( options.interactive_shell if (( options.interactive_shell
|| !options.script_executed ) || !options.script_executed )
&& !options.test_shell ) { && !options.test_shell ) {
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT) #ifndef V8_SHARED
if (!i::FLAG_debugger) { InstallUtilityScript();
InstallUtilityScript(); #endif // V8_SHARED
}
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
RunShell(); RunShell();
} }

226
deps/v8/src/debug.cc

@ -40,7 +40,6 @@
#include "global-handles.h" #include "global-handles.h"
#include "ic.h" #include "ic.h"
#include "ic-inl.h" #include "ic-inl.h"
#include "isolate-inl.h"
#include "list.h" #include "list.h"
#include "messages.h" #include "messages.h"
#include "natives.h" #include "natives.h"
@ -402,15 +401,15 @@ void BreakLocationIterator::PrepareStepIn() {
// Step in can only be prepared if currently positioned on an IC call, // Step in can only be prepared if currently positioned on an IC call,
// construct call or CallFunction stub call. // construct call or CallFunction stub call.
Address target = rinfo()->target_address(); Address target = rinfo()->target_address();
Handle<Code> target_code(Code::GetCodeFromTargetAddress(target)); Handle<Code> code(Code::GetCodeFromTargetAddress(target));
if (target_code->is_call_stub() || target_code->is_keyed_call_stub()) { if (code->is_call_stub() || code->is_keyed_call_stub()) {
// Step in through IC call is handled by the runtime system. Therefore make // Step in through IC call is handled by the runtime system. Therefore make
// sure that the any current IC is cleared and the runtime system is // sure that the any current IC is cleared and the runtime system is
// called. If the executing code has a debug break at the location change // called. If the executing code has a debug break at the location change
// the call in the original code as it is the code there that will be // the call in the original code as it is the code there that will be
// executed in place of the debug break call. // executed in place of the debug break call.
Handle<Code> stub = ComputeCallDebugPrepareStepIn( Handle<Code> stub = ComputeCallDebugPrepareStepIn(code->arguments_count(),
target_code->arguments_count(), target_code->kind()); code->kind());
if (IsDebugBreak()) { if (IsDebugBreak()) {
original_rinfo()->set_target_address(stub->entry()); original_rinfo()->set_target_address(stub->entry());
} else { } else {
@ -420,7 +419,7 @@ void BreakLocationIterator::PrepareStepIn() {
#ifdef DEBUG #ifdef DEBUG
// All the following stuff is needed only for assertion checks so the code // All the following stuff is needed only for assertion checks so the code
// is wrapped in ifdef. // is wrapped in ifdef.
Handle<Code> maybe_call_function_stub = target_code; Handle<Code> maybe_call_function_stub = code;
if (IsDebugBreak()) { if (IsDebugBreak()) {
Address original_target = original_rinfo()->target_address(); Address original_target = original_rinfo()->target_address();
maybe_call_function_stub = maybe_call_function_stub =
@ -437,9 +436,8 @@ void BreakLocationIterator::PrepareStepIn() {
// Step in through CallFunction stub should also be prepared by caller of // Step in through CallFunction stub should also be prepared by caller of
// this function (Debug::PrepareStep) which should flood target function // this function (Debug::PrepareStep) which should flood target function
// with breakpoints. // with breakpoints.
ASSERT(RelocInfo::IsConstructCall(rmode()) || ASSERT(RelocInfo::IsConstructCall(rmode()) || code->is_inline_cache_stub()
target_code->is_inline_cache_stub() || || is_call_function_stub);
is_call_function_stub);
#endif #endif
} }
} }
@ -476,11 +474,11 @@ void BreakLocationIterator::SetDebugBreakAtIC() {
RelocInfo::Mode mode = rmode(); RelocInfo::Mode mode = rmode();
if (RelocInfo::IsCodeTarget(mode)) { if (RelocInfo::IsCodeTarget(mode)) {
Address target = rinfo()->target_address(); Address target = rinfo()->target_address();
Handle<Code> target_code(Code::GetCodeFromTargetAddress(target)); Handle<Code> code(Code::GetCodeFromTargetAddress(target));
// Patch the code to invoke the builtin debug break function matching the // Patch the code to invoke the builtin debug break function matching the
// calling convention used by the call site. // calling convention used by the call site.
Handle<Code> dbgbrk_code(Debug::FindDebugBreak(target_code, mode)); Handle<Code> dbgbrk_code(Debug::FindDebugBreak(code, mode));
rinfo()->set_target_address(dbgbrk_code->entry()); rinfo()->set_target_address(dbgbrk_code->entry());
} }
} }
@ -774,7 +772,7 @@ bool Debug::CompileDebuggerScript(int index) {
// Execute the shared function in the debugger context. // Execute the shared function in the debugger context.
Handle<Context> context = isolate->global_context(); Handle<Context> context = isolate->global_context();
bool caught_exception; bool caught_exception = false;
Handle<JSFunction> function = Handle<JSFunction> function =
factory->NewFunctionFromSharedFunctionInfo(function_info, context); factory->NewFunctionFromSharedFunctionInfo(function_info, context);
@ -1105,13 +1103,14 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id()); Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id());
// Call HandleBreakPointx. // Call HandleBreakPointx.
bool caught_exception; bool caught_exception = false;
Handle<Object> argv[] = { break_id, break_point_object }; const int argc = 2;
Object** argv[argc] = {
break_id.location(),
reinterpret_cast<Object**>(break_point_object.location())
};
Handle<Object> result = Execution::TryCall(check_break_point, Handle<Object> result = Execution::TryCall(check_break_point,
isolate_->js_builtins_object(), isolate_->js_builtins_object(), argc, argv, &caught_exception);
ARRAY_SIZE(argv),
argv,
&caught_exception);
// If exception or non boolean result handle as not triggered // If exception or non boolean result handle as not triggered
if (caught_exception || !result->IsBoolean()) { if (caught_exception || !result->IsBoolean()) {
@ -1733,10 +1732,6 @@ void Debug::PrepareForBreakPoints() {
if (!has_break_points_) { if (!has_break_points_) {
Deoptimizer::DeoptimizeAll(); Deoptimizer::DeoptimizeAll();
// We are going to iterate heap to find all functions without
// debug break slots.
isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
AssertNoAllocation no_allocation; AssertNoAllocation no_allocation;
Builtins* builtins = isolate_->builtins(); Builtins* builtins = isolate_->builtins();
Code* lazy_compile = builtins->builtin(Builtins::kLazyCompile); Code* lazy_compile = builtins->builtin(Builtins::kLazyCompile);
@ -2002,10 +1997,9 @@ void Debug::CreateScriptCache() {
// Perform two GCs to get rid of all unreferenced scripts. The first GC gets // Perform two GCs to get rid of all unreferenced scripts. The first GC gets
// rid of all the cached script wrappers and the second gets rid of the // rid of all the cached script wrappers and the second gets rid of the
// scripts which are no longer referenced. The second also sweeps precisely, // scripts which are no longer referenced.
// which saves us doing yet another GC to make the heap iterable. heap->CollectAllGarbage(false);
heap->CollectAllGarbage(Heap::kNoGCFlags); heap->CollectAllGarbage(false);
heap->CollectAllGarbage(Heap::kMakeHeapIterableMask);
ASSERT(script_cache_ == NULL); ASSERT(script_cache_ == NULL);
script_cache_ = new ScriptCache(); script_cache_ = new ScriptCache();
@ -2013,8 +2007,6 @@ void Debug::CreateScriptCache() {
// Scan heap for Script objects. // Scan heap for Script objects.
int count = 0; int count = 0;
HeapIterator iterator; HeapIterator iterator;
AssertNoAllocation no_allocation;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (obj->IsScript() && Script::cast(obj)->HasValidSource()) { if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
script_cache_->Add(Handle<Script>(Script::cast(obj))); script_cache_->Add(Handle<Script>(Script::cast(obj)));
@ -2055,7 +2047,7 @@ Handle<FixedArray> Debug::GetLoadedScripts() {
// Perform GC to get unreferenced scripts evicted from the cache before // Perform GC to get unreferenced scripts evicted from the cache before
// returning the content. // returning the content.
isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags); isolate_->heap()->CollectAllGarbage(false);
// Get the scripts from the cache. // Get the scripts from the cache.
return script_cache_->GetScripts(); return script_cache_->GetScripts();
@ -2101,8 +2093,7 @@ Debugger::~Debugger() {
Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name, Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
int argc, int argc, Object*** argv,
Handle<Object> argv[],
bool* caught_exception) { bool* caught_exception) {
ASSERT(isolate_->context() == *isolate_->debug()->debug_context()); ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
@ -2119,9 +2110,7 @@ Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
Handle<Object> js_object = Execution::TryCall( Handle<Object> js_object = Execution::TryCall(
Handle<JSFunction>::cast(constructor), Handle<JSFunction>::cast(constructor),
Handle<JSObject>(isolate_->debug()->debug_context()->global()), Handle<JSObject>(isolate_->debug()->debug_context()->global()),
argc, argc, argv, caught_exception);
argv,
caught_exception);
return js_object; return js_object;
} }
@ -2130,11 +2119,10 @@ Handle<Object> Debugger::MakeExecutionState(bool* caught_exception) {
// Create the execution state object. // Create the execution state object.
Handle<Object> break_id = isolate_->factory()->NewNumberFromInt( Handle<Object> break_id = isolate_->factory()->NewNumberFromInt(
isolate_->debug()->break_id()); isolate_->debug()->break_id());
Handle<Object> argv[] = { break_id }; const int argc = 1;
Object** argv[argc] = { break_id.location() };
return MakeJSObject(CStrVector("MakeExecutionState"), return MakeJSObject(CStrVector("MakeExecutionState"),
ARRAY_SIZE(argv), argc, argv, caught_exception);
argv,
caught_exception);
} }
@ -2142,9 +2130,11 @@ Handle<Object> Debugger::MakeBreakEvent(Handle<Object> exec_state,
Handle<Object> break_points_hit, Handle<Object> break_points_hit,
bool* caught_exception) { bool* caught_exception) {
// Create the new break event object. // Create the new break event object.
Handle<Object> argv[] = { exec_state, break_points_hit }; const int argc = 2;
Object** argv[argc] = { exec_state.location(),
break_points_hit.location() };
return MakeJSObject(CStrVector("MakeBreakEvent"), return MakeJSObject(CStrVector("MakeBreakEvent"),
ARRAY_SIZE(argv), argc,
argv, argv,
caught_exception); caught_exception);
} }
@ -2156,24 +2146,23 @@ Handle<Object> Debugger::MakeExceptionEvent(Handle<Object> exec_state,
bool* caught_exception) { bool* caught_exception) {
Factory* factory = isolate_->factory(); Factory* factory = isolate_->factory();
// Create the new exception event object. // Create the new exception event object.
Handle<Object> argv[] = { exec_state, const int argc = 3;
exception, Object** argv[argc] = { exec_state.location(),
factory->ToBoolean(uncaught) }; exception.location(),
uncaught ? factory->true_value().location() :
factory->false_value().location()};
return MakeJSObject(CStrVector("MakeExceptionEvent"), return MakeJSObject(CStrVector("MakeExceptionEvent"),
ARRAY_SIZE(argv), argc, argv, caught_exception);
argv,
caught_exception);
} }
Handle<Object> Debugger::MakeNewFunctionEvent(Handle<Object> function, Handle<Object> Debugger::MakeNewFunctionEvent(Handle<Object> function,
bool* caught_exception) { bool* caught_exception) {
// Create the new function event object. // Create the new function event object.
Handle<Object> argv[] = { function }; const int argc = 1;
Object** argv[argc] = { function.location() };
return MakeJSObject(CStrVector("MakeNewFunctionEvent"), return MakeJSObject(CStrVector("MakeNewFunctionEvent"),
ARRAY_SIZE(argv), argc, argv, caught_exception);
argv,
caught_exception);
} }
@ -2184,11 +2173,14 @@ Handle<Object> Debugger::MakeCompileEvent(Handle<Script> script,
// Create the compile event object. // Create the compile event object.
Handle<Object> exec_state = MakeExecutionState(caught_exception); Handle<Object> exec_state = MakeExecutionState(caught_exception);
Handle<Object> script_wrapper = GetScriptWrapper(script); Handle<Object> script_wrapper = GetScriptWrapper(script);
Handle<Object> argv[] = { exec_state, const int argc = 3;
script_wrapper, Object** argv[argc] = { exec_state.location(),
factory->ToBoolean(before) }; script_wrapper.location(),
before ? factory->true_value().location() :
factory->false_value().location() };
return MakeJSObject(CStrVector("MakeCompileEvent"), return MakeJSObject(CStrVector("MakeCompileEvent"),
ARRAY_SIZE(argv), argc,
argv, argv,
caught_exception); caught_exception);
} }
@ -2199,10 +2191,11 @@ Handle<Object> Debugger::MakeScriptCollectedEvent(int id,
// Create the script collected event object. // Create the script collected event object.
Handle<Object> exec_state = MakeExecutionState(caught_exception); Handle<Object> exec_state = MakeExecutionState(caught_exception);
Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id)); Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id));
Handle<Object> argv[] = { exec_state, id_object }; const int argc = 2;
Object** argv[argc] = { exec_state.location(), id_object.location() };
return MakeJSObject(CStrVector("MakeScriptCollectedEvent"), return MakeJSObject(CStrVector("MakeScriptCollectedEvent"),
ARRAY_SIZE(argv), argc,
argv, argv,
caught_exception); caught_exception);
} }
@ -2352,13 +2345,12 @@ void Debugger::OnAfterCompile(Handle<Script> script,
Handle<JSValue> wrapper = GetScriptWrapper(script); Handle<JSValue> wrapper = GetScriptWrapper(script);
// Call UpdateScriptBreakPoints expect no exceptions. // Call UpdateScriptBreakPoints expect no exceptions.
bool caught_exception; bool caught_exception = false;
Handle<Object> argv[] = { wrapper }; const int argc = 1;
Object** argv[argc] = { reinterpret_cast<Object**>(wrapper.location()) };
Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points), Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points),
Isolate::Current()->js_builtins_object(), Isolate::Current()->js_builtins_object(), argc, argv,
ARRAY_SIZE(argv), &caught_exception);
argv,
&caught_exception);
if (caught_exception) { if (caught_exception) {
return; return;
} }
@ -2489,16 +2481,13 @@ void Debugger::CallJSEventCallback(v8::DebugEvent event,
Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_)); Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_));
// Invoke the JavaScript debug event listener. // Invoke the JavaScript debug event listener.
Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event)), const int argc = 4;
exec_state, Object** argv[argc] = { Handle<Object>(Smi::FromInt(event)).location(),
event_data, exec_state.location(),
event_listener_data_ }; Handle<Object>::cast(event_data).location(),
bool caught_exception; event_listener_data_.location() };
Execution::TryCall(fun, bool caught_exception = false;
isolate_->global(), Execution::TryCall(fun, isolate_->global(), argc, argv, &caught_exception);
ARRAY_SIZE(argv),
argv,
&caught_exception);
// Silently ignore exceptions from debug event listeners. // Silently ignore exceptions from debug event listeners.
} }
@ -2867,11 +2856,12 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun,
return isolate_->factory()->undefined_value(); return isolate_->factory()->undefined_value();
} }
Handle<Object> argv[] = { exec_state, data }; static const int kArgc = 2;
Object** argv[kArgc] = { exec_state.location(), data.location() };
Handle<Object> result = Execution::Call( Handle<Object> result = Execution::Call(
fun, fun,
Handle<Object>(isolate_->debug()->debug_context_->global_proxy()), Handle<Object>(isolate_->debug()->debug_context_->global_proxy()),
ARRAY_SIZE(argv), kArgc,
argv, argv,
pending_exception); pending_exception);
return result; return result;
@ -2939,94 +2929,6 @@ void Debugger::CallMessageDispatchHandler() {
} }
EnterDebugger::EnterDebugger()
: isolate_(Isolate::Current()),
prev_(isolate_->debug()->debugger_entry()),
it_(isolate_),
has_js_frames_(!it_.done()),
save_(isolate_) {
Debug* debug = isolate_->debug();
ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT));
ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK));
// Link recursive debugger entry.
debug->set_debugger_entry(this);
// Store the previous break id and frame id.
break_id_ = debug->break_id();
break_frame_id_ = debug->break_frame_id();
// Create the new break info. If there is no JavaScript frames there is no
// break frame id.
if (has_js_frames_) {
debug->NewBreak(it_.frame()->id());
} else {
debug->NewBreak(StackFrame::NO_ID);
}
// Make sure that debugger is loaded and enter the debugger context.
load_failed_ = !debug->Load();
if (!load_failed_) {
// NOTE the member variable save which saves the previous context before
// this change.
isolate_->set_context(*debug->debug_context());
}
}
EnterDebugger::~EnterDebugger() {
ASSERT(Isolate::Current() == isolate_);
Debug* debug = isolate_->debug();
// Restore to the previous break state.
debug->SetBreak(break_frame_id_, break_id_);
// Check for leaving the debugger.
if (prev_ == NULL) {
// Clear mirror cache when leaving the debugger. Skip this if there is a
// pending exception as clearing the mirror cache calls back into
// JavaScript. This can happen if the v8::Debug::Call is used in which
// case the exception should end up in the calling code.
if (!isolate_->has_pending_exception()) {
// Try to avoid any pending debug break breaking in the clear mirror
// cache JavaScript code.
if (isolate_->stack_guard()->IsDebugBreak()) {
debug->set_interrupts_pending(DEBUGBREAK);
isolate_->stack_guard()->Continue(DEBUGBREAK);
}
debug->ClearMirrorCache();
}
// Request preemption and debug break when leaving the last debugger entry
// if any of these where recorded while debugging.
if (debug->is_interrupt_pending(PREEMPT)) {
// This re-scheduling of preemption is to avoid starvation in some
// debugging scenarios.
debug->clear_interrupt_pending(PREEMPT);
isolate_->stack_guard()->Preempt();
}
if (debug->is_interrupt_pending(DEBUGBREAK)) {
debug->clear_interrupt_pending(DEBUGBREAK);
isolate_->stack_guard()->DebugBreak();
}
// If there are commands in the queue when leaving the debugger request
// that these commands are processed.
if (isolate_->debugger()->HasCommands()) {
isolate_->stack_guard()->DebugCommand();
}
// If leaving the debugger with the debugger no longer active unload it.
if (!isolate_->debugger()->IsDebuggerActive()) {
isolate_->debugger()->UnloadDebugger();
}
}
// Leaving this debugger entry.
debug->set_debugger_entry(prev_);
}
MessageImpl MessageImpl::NewEvent(DebugEvent event, MessageImpl MessageImpl::NewEvent(DebugEvent event,
bool running, bool running,
Handle<JSObject> exec_state, Handle<JSObject> exec_state,

90
deps/v8/src/debug.h

@ -705,8 +705,7 @@ class Debugger {
void DebugRequest(const uint16_t* json_request, int length); void DebugRequest(const uint16_t* json_request, int length);
Handle<Object> MakeJSObject(Vector<const char> constructor_name, Handle<Object> MakeJSObject(Vector<const char> constructor_name,
int argc, int argc, Object*** argv,
Handle<Object> argv[],
bool* caught_exception); bool* caught_exception);
Handle<Object> MakeExecutionState(bool* caught_exception); Handle<Object> MakeExecutionState(bool* caught_exception);
Handle<Object> MakeBreakEvent(Handle<Object> exec_state, Handle<Object> MakeBreakEvent(Handle<Object> exec_state,
@ -870,8 +869,91 @@ class Debugger {
// some reason could not be entered FailedToEnter will return true. // some reason could not be entered FailedToEnter will return true.
class EnterDebugger BASE_EMBEDDED { class EnterDebugger BASE_EMBEDDED {
public: public:
EnterDebugger(); EnterDebugger()
~EnterDebugger(); : isolate_(Isolate::Current()),
prev_(isolate_->debug()->debugger_entry()),
it_(isolate_),
has_js_frames_(!it_.done()),
save_(isolate_) {
Debug* debug = isolate_->debug();
ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT));
ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK));
// Link recursive debugger entry.
debug->set_debugger_entry(this);
// Store the previous break id and frame id.
break_id_ = debug->break_id();
break_frame_id_ = debug->break_frame_id();
// Create the new break info. If there is no JavaScript frames there is no
// break frame id.
if (has_js_frames_) {
debug->NewBreak(it_.frame()->id());
} else {
debug->NewBreak(StackFrame::NO_ID);
}
// Make sure that debugger is loaded and enter the debugger context.
load_failed_ = !debug->Load();
if (!load_failed_) {
// NOTE the member variable save which saves the previous context before
// this change.
isolate_->set_context(*debug->debug_context());
}
}
~EnterDebugger() {
ASSERT(Isolate::Current() == isolate_);
Debug* debug = isolate_->debug();
// Restore to the previous break state.
debug->SetBreak(break_frame_id_, break_id_);
// Check for leaving the debugger.
if (prev_ == NULL) {
// Clear mirror cache when leaving the debugger. Skip this if there is a
// pending exception as clearing the mirror cache calls back into
// JavaScript. This can happen if the v8::Debug::Call is used in which
// case the exception should end up in the calling code.
if (!isolate_->has_pending_exception()) {
// Try to avoid any pending debug break breaking in the clear mirror
// cache JavaScript code.
if (isolate_->stack_guard()->IsDebugBreak()) {
debug->set_interrupts_pending(DEBUGBREAK);
isolate_->stack_guard()->Continue(DEBUGBREAK);
}
debug->ClearMirrorCache();
}
// Request preemption and debug break when leaving the last debugger entry
// if any of these where recorded while debugging.
if (debug->is_interrupt_pending(PREEMPT)) {
// This re-scheduling of preemption is to avoid starvation in some
// debugging scenarios.
debug->clear_interrupt_pending(PREEMPT);
isolate_->stack_guard()->Preempt();
}
if (debug->is_interrupt_pending(DEBUGBREAK)) {
debug->clear_interrupt_pending(DEBUGBREAK);
isolate_->stack_guard()->DebugBreak();
}
// If there are commands in the queue when leaving the debugger request
// that these commands are processed.
if (isolate_->debugger()->HasCommands()) {
isolate_->stack_guard()->DebugCommand();
}
// If leaving the debugger with the debugger no longer active unload it.
if (!isolate_->debugger()->IsDebuggerActive()) {
isolate_->debugger()->UnloadDebugger();
}
}
// Leaving this debugger entry.
debug->set_debugger_entry(prev_);
}
// Check whether the debugger could be entered. // Check whether the debugger could be entered.
inline bool FailedToEnter() { return load_failed_; } inline bool FailedToEnter() { return load_failed_; }

70
deps/v8/src/deoptimizer.cc

@ -52,13 +52,11 @@ DeoptimizerData::DeoptimizerData() {
DeoptimizerData::~DeoptimizerData() { DeoptimizerData::~DeoptimizerData() {
if (eager_deoptimization_entry_code_ != NULL) { if (eager_deoptimization_entry_code_ != NULL) {
Isolate::Current()->memory_allocator()->Free( eager_deoptimization_entry_code_->Free(EXECUTABLE);
eager_deoptimization_entry_code_);
eager_deoptimization_entry_code_ = NULL; eager_deoptimization_entry_code_ = NULL;
} }
if (lazy_deoptimization_entry_code_ != NULL) { if (lazy_deoptimization_entry_code_ != NULL) {
Isolate::Current()->memory_allocator()->Free( lazy_deoptimization_entry_code_->Free(EXECUTABLE);
lazy_deoptimization_entry_code_);
lazy_deoptimization_entry_code_ = NULL; lazy_deoptimization_entry_code_ = NULL;
} }
} }
@ -73,8 +71,6 @@ void DeoptimizerData::Iterate(ObjectVisitor* v) {
#endif #endif
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
Deoptimizer* Deoptimizer::New(JSFunction* function, Deoptimizer* Deoptimizer::New(JSFunction* function,
BailoutType type, BailoutType type,
unsigned bailout_id, unsigned bailout_id,
@ -323,8 +319,6 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
input_(NULL), input_(NULL),
output_count_(0), output_count_(0),
output_(NULL), output_(NULL),
frame_alignment_marker_(isolate->heap()->frame_alignment_marker()),
has_alignment_padding_(0),
deferred_heap_numbers_(0) { deferred_heap_numbers_(0) {
if (FLAG_trace_deopt && type != OSR) { if (FLAG_trace_deopt && type != OSR) {
if (type == DEBUGGER) { if (type == DEBUGGER) {
@ -349,26 +343,6 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
if (type == EAGER) { if (type == EAGER) {
ASSERT(from == NULL); ASSERT(from == NULL);
optimized_code_ = function_->code(); optimized_code_ = function_->code();
if (FLAG_trace_deopt && FLAG_code_comments) {
// Print instruction associated with this bailout.
const char* last_comment = NULL;
int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
| RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
for (RelocIterator it(optimized_code_, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
if (info->rmode() == RelocInfo::COMMENT) {
last_comment = reinterpret_cast<const char*>(info->data());
}
if (info->rmode() == RelocInfo::RUNTIME_ENTRY) {
unsigned id = Deoptimizer::GetDeoptimizationId(
info->target_address(), Deoptimizer::EAGER);
if (id == bailout_id && last_comment != NULL) {
PrintF(" %s\n", last_comment);
break;
}
}
}
}
} else if (type == LAZY) { } else if (type == LAZY) {
optimized_code_ = FindDeoptimizingCodeFromAddress(from); optimized_code_ = FindDeoptimizingCodeFromAddress(from);
ASSERT(optimized_code_ != NULL); ASSERT(optimized_code_ != NULL);
@ -412,7 +386,7 @@ void Deoptimizer::DeleteFrameDescriptions() {
Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) { Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
ASSERT(id >= 0); ASSERT(id >= 0);
if (id >= kNumberOfEntries) return NULL; if (id >= kNumberOfEntries) return NULL;
MemoryChunk* base = NULL; LargeObjectChunk* base = NULL;
DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) { if (type == EAGER) {
if (data->eager_deoptimization_entry_code_ == NULL) { if (data->eager_deoptimization_entry_code_ == NULL) {
@ -426,12 +400,12 @@ Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
base = data->lazy_deoptimization_entry_code_; base = data->lazy_deoptimization_entry_code_;
} }
return return
static_cast<Address>(base->body()) + (id * table_entry_size_); static_cast<Address>(base->GetStartAddress()) + (id * table_entry_size_);
} }
int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) { int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
MemoryChunk* base = NULL; LargeObjectChunk* base = NULL;
DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) { if (type == EAGER) {
base = data->eager_deoptimization_entry_code_; base = data->eager_deoptimization_entry_code_;
@ -439,14 +413,14 @@ int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
base = data->lazy_deoptimization_entry_code_; base = data->lazy_deoptimization_entry_code_;
} }
if (base == NULL || if (base == NULL ||
addr < base->body() || addr < base->GetStartAddress() ||
addr >= base->body() + addr >= base->GetStartAddress() +
(kNumberOfEntries * table_entry_size_)) { (kNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry; return kNotDeoptimizationEntry;
} }
ASSERT_EQ(0, ASSERT_EQ(0,
static_cast<int>(addr - base->body()) % table_entry_size_); static_cast<int>(addr - base->GetStartAddress()) % table_entry_size_);
return static_cast<int>(addr - base->body()) / table_entry_size_; return static_cast<int>(addr - base->GetStartAddress()) / table_entry_size_;
} }
@ -488,8 +462,6 @@ int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
} }
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
void Deoptimizer::DoComputeOutputFrames() { void Deoptimizer::DoComputeOutputFrames() {
if (bailout_type_ == OSR) { if (bailout_type_ == OSR) {
DoComputeOsrOutputFrame(); DoComputeOsrOutputFrame();
@ -641,13 +613,11 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
intptr_t input_value = input_->GetRegister(input_reg); intptr_t input_value = input_->GetRegister(input_reg);
if (FLAG_trace_deopt) { if (FLAG_trace_deopt) {
PrintF( PrintF(
" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s ", " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s\n",
output_[frame_index]->GetTop() + output_offset, output_[frame_index]->GetTop() + output_offset,
output_offset, output_offset,
input_value, input_value,
converter.NameOfCPURegister(input_reg)); converter.NameOfCPURegister(input_reg));
reinterpret_cast<Object*>(input_value)->ShortPrint();
PrintF("\n");
} }
output_[frame_index]->SetFrameSlot(output_offset, input_value); output_[frame_index]->SetFrameSlot(output_offset, input_value);
return; return;
@ -705,12 +675,10 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
if (FLAG_trace_deopt) { if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": ", PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset); output_[frame_index]->GetTop() + output_offset);
PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] ", PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
output_offset, output_offset,
input_value, input_value,
input_offset); input_offset);
reinterpret_cast<Object*>(input_value)->ShortPrint();
PrintF("\n");
} }
output_[frame_index]->SetFrameSlot(output_offset, input_value); output_[frame_index]->SetFrameSlot(output_offset, input_value);
return; return;
@ -985,10 +953,7 @@ void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
for (uint32_t i = 0; i < table_length; ++i) { for (uint32_t i = 0; i < table_length; ++i) {
uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize); uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
Address pc_after = unoptimized_code->instruction_start() + pc_offset; Address pc_after = unoptimized_code->instruction_start() + pc_offset;
PatchStackCheckCodeAt(unoptimized_code, PatchStackCheckCodeAt(pc_after, check_code, replacement_code);
pc_after,
check_code,
replacement_code);
stack_check_cursor += 2 * kIntSize; stack_check_cursor += 2 * kIntSize;
} }
} }
@ -1074,7 +1039,7 @@ void Deoptimizer::AddDoubleValue(intptr_t slot_address,
} }
MemoryChunk* Deoptimizer::CreateCode(BailoutType type) { LargeObjectChunk* Deoptimizer::CreateCode(BailoutType type) {
// We cannot run this if the serializer is enabled because this will // We cannot run this if the serializer is enabled because this will
// cause us to emit relocation information for the external // cause us to emit relocation information for the external
// references. This is fine because the deoptimizer's code section // references. This is fine because the deoptimizer's code section
@ -1088,15 +1053,12 @@ MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
masm.GetCode(&desc); masm.GetCode(&desc);
ASSERT(desc.reloc_size == 0); ASSERT(desc.reloc_size == 0);
MemoryChunk* chunk = LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
EXECUTABLE,
NULL);
if (chunk == NULL) { if (chunk == NULL) {
V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table"); V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
} }
memcpy(chunk->body(), desc.buffer, desc.instr_size); memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
CPU::FlushICache(chunk->body(), desc.instr_size); CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
return chunk; return chunk;
} }

18
deps/v8/src/deoptimizer.h

@ -86,8 +86,8 @@ class DeoptimizerData {
#endif #endif
private: private:
MemoryChunk* eager_deoptimization_entry_code_; LargeObjectChunk* eager_deoptimization_entry_code_;
MemoryChunk* lazy_deoptimization_entry_code_; LargeObjectChunk* lazy_deoptimization_entry_code_;
Deoptimizer* current_; Deoptimizer* current_;
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
@ -173,8 +173,7 @@ class Deoptimizer : public Malloced {
// Patch stack guard check at instruction before pc_after in // Patch stack guard check at instruction before pc_after in
// the unoptimized code to unconditionally call replacement_code. // the unoptimized code to unconditionally call replacement_code.
static void PatchStackCheckCodeAt(Code* unoptimized_code, static void PatchStackCheckCodeAt(Address pc_after,
Address pc_after,
Code* check_code, Code* check_code,
Code* replacement_code); Code* replacement_code);
@ -212,11 +211,6 @@ class Deoptimizer : public Malloced {
return OFFSET_OF(Deoptimizer, output_count_); return OFFSET_OF(Deoptimizer, output_count_);
} }
static int output_offset() { return OFFSET_OF(Deoptimizer, output_); } static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
static int frame_alignment_marker_offset() {
return OFFSET_OF(Deoptimizer, frame_alignment_marker_); }
static int has_alignment_padding_offset() {
return OFFSET_OF(Deoptimizer, has_alignment_padding_);
}
static int GetDeoptimizedCodeCount(Isolate* isolate); static int GetDeoptimizedCodeCount(Isolate* isolate);
@ -291,7 +285,7 @@ class Deoptimizer : public Malloced {
void AddDoubleValue(intptr_t slot_address, double value); void AddDoubleValue(intptr_t slot_address, double value);
static MemoryChunk* CreateCode(BailoutType type); static LargeObjectChunk* CreateCode(BailoutType type);
static void GenerateDeoptimizationEntries( static void GenerateDeoptimizationEntries(
MacroAssembler* masm, int count, BailoutType type); MacroAssembler* masm, int count, BailoutType type);
@ -321,10 +315,6 @@ class Deoptimizer : public Malloced {
// Array of output frame descriptions. // Array of output frame descriptions.
FrameDescription** output_; FrameDescription** output_;
// Frames can be dynamically padded on ia32 to align untagged doubles.
Object* frame_alignment_marker_;
intptr_t has_alignment_padding_;
List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_; List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
static const int table_entry_size_; static const int table_entry_size_;

2
deps/v8/src/disassembler.cc

@ -200,7 +200,7 @@ static int DecodeIt(FILE* f,
// Print all the reloc info for this instruction which are not comments. // Print all the reloc info for this instruction which are not comments.
for (int i = 0; i < pcs.length(); i++) { for (int i = 0; i < pcs.length(); i++) {
// Put together the reloc info // Put together the reloc info
RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], NULL); RelocInfo relocinfo(pcs[i], rmodes[i], datas[i]);
// Indent the printing of the reloc info. // Indent the printing of the reloc info.
if (i == 0) { if (i == 0) {

11
deps/v8/src/elements.cc

@ -227,9 +227,7 @@ class FastElementsAccessor
public: public:
static MaybeObject* DeleteCommon(JSObject* obj, static MaybeObject* DeleteCommon(JSObject* obj,
uint32_t key) { uint32_t key) {
ASSERT(obj->HasFastElements() || ASSERT(obj->HasFastElements() || obj->HasFastArgumentsElements());
obj->HasFastSmiOnlyElements() ||
obj->HasFastArgumentsElements());
Heap* heap = obj->GetHeap(); Heap* heap = obj->GetHeap();
FixedArray* backing_store = FixedArray::cast(obj->elements()); FixedArray* backing_store = FixedArray::cast(obj->elements());
if (backing_store->map() == heap->non_strict_arguments_elements_map()) { if (backing_store->map() == heap->non_strict_arguments_elements_map()) {
@ -598,9 +596,6 @@ ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) {
void ElementsAccessor::InitializeOncePerProcess() { void ElementsAccessor::InitializeOncePerProcess() {
static struct ConcreteElementsAccessors { static struct ConcreteElementsAccessors {
// Use the fast element handler for smi-only arrays. The implementation is
// currently identical.
FastElementsAccessor fast_smi_elements_handler;
FastElementsAccessor fast_elements_handler; FastElementsAccessor fast_elements_handler;
FastDoubleElementsAccessor fast_double_elements_handler; FastDoubleElementsAccessor fast_double_elements_handler;
DictionaryElementsAccessor dictionary_elements_handler; DictionaryElementsAccessor dictionary_elements_handler;
@ -617,7 +612,6 @@ void ElementsAccessor::InitializeOncePerProcess() {
} element_accessors; } element_accessors;
static ElementsAccessor* accessor_array[] = { static ElementsAccessor* accessor_array[] = {
&element_accessors.fast_smi_elements_handler,
&element_accessors.fast_elements_handler, &element_accessors.fast_elements_handler,
&element_accessors.fast_double_elements_handler, &element_accessors.fast_double_elements_handler,
&element_accessors.dictionary_elements_handler, &element_accessors.dictionary_elements_handler,
@ -633,9 +627,6 @@ void ElementsAccessor::InitializeOncePerProcess() {
&element_accessors.pixel_elements_handler &element_accessors.pixel_elements_handler
}; };
STATIC_ASSERT((sizeof(accessor_array) / sizeof(*accessor_array)) ==
kElementsKindCount);
elements_accessors_ = accessor_array; elements_accessors_ = accessor_array;
} }

175
deps/v8/src/execution.cc

@ -33,7 +33,6 @@
#include "bootstrapper.h" #include "bootstrapper.h"
#include "codegen.h" #include "codegen.h"
#include "debug.h" #include "debug.h"
#include "isolate-inl.h"
#include "runtime-profiler.h" #include "runtime-profiler.h"
#include "simulator.h" #include "simulator.h"
#include "v8threads.h" #include "v8threads.h"
@ -66,13 +65,13 @@ void StackGuard::reset_limits(const ExecutionAccess& lock) {
} }
static Handle<Object> Invoke(bool is_construct, static Handle<Object> Invoke(bool construct,
Handle<JSFunction> function, Handle<JSFunction> func,
Handle<Object> receiver, Handle<Object> receiver,
int argc, int argc,
Handle<Object> args[], Object*** args,
bool* has_pending_exception) { bool* has_pending_exception) {
Isolate* isolate = function->GetIsolate(); Isolate* isolate = func->GetIsolate();
// Entering JavaScript. // Entering JavaScript.
VMState state(isolate, JS); VMState state(isolate, JS);
@ -80,15 +79,21 @@ static Handle<Object> Invoke(bool is_construct,
// Placeholder for return value. // Placeholder for return value.
MaybeObject* value = reinterpret_cast<Object*>(kZapValue); MaybeObject* value = reinterpret_cast<Object*>(kZapValue);
typedef Object* (*JSEntryFunction)(byte* entry, typedef Object* (*JSEntryFunction)(
Object* function, byte* entry,
Object* receiver, Object* function,
int argc, Object* receiver,
Object*** args); int argc,
Object*** args);
Handle<Code> code = is_construct
? isolate->factory()->js_construct_entry_code() Handle<Code> code;
: isolate->factory()->js_entry_code(); if (construct) {
JSConstructEntryStub stub;
code = stub.GetCode();
} else {
JSEntryStub stub;
code = stub.GetCode();
}
// Convert calls on global objects to be calls on the global // Convert calls on global objects to be calls on the global
// receiver instead to avoid having a 'this' pointer which refers // receiver instead to avoid having a 'this' pointer which refers
@ -100,22 +105,21 @@ static Handle<Object> Invoke(bool is_construct,
// Make sure that the global object of the context we're about to // Make sure that the global object of the context we're about to
// make the current one is indeed a global object. // make the current one is indeed a global object.
ASSERT(function->context()->global()->IsGlobalObject()); ASSERT(func->context()->global()->IsGlobalObject());
{ {
// Save and restore context around invocation and block the // Save and restore context around invocation and block the
// allocation of handles without explicit handle scopes. // allocation of handles without explicit handle scopes.
SaveContext save(isolate); SaveContext save(isolate);
NoHandleAllocation na; NoHandleAllocation na;
JSEntryFunction stub_entry = FUNCTION_CAST<JSEntryFunction>(code->entry()); JSEntryFunction entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
// Call the function through the right JS entry stub. // Call the function through the right JS entry stub.
byte* function_entry = function->code()->entry(); byte* entry_address = func->code()->entry();
JSFunction* func = *function; JSFunction* function = *func;
Object* recv = *receiver; Object* receiver_pointer = *receiver;
Object*** argv = reinterpret_cast<Object***>(args); value = CALL_GENERATED_CODE(entry, entry_address, function,
value = receiver_pointer, argc, args);
CALL_GENERATED_CODE(stub_entry, function_entry, func, recv, argc, argv);
} }
#ifdef DEBUG #ifdef DEBUG
@ -144,11 +148,9 @@ static Handle<Object> Invoke(bool is_construct,
Handle<Object> Execution::Call(Handle<Object> callable, Handle<Object> Execution::Call(Handle<Object> callable,
Handle<Object> receiver, Handle<Object> receiver,
int argc, int argc,
Handle<Object> argv[], Object*** args,
bool* pending_exception, bool* pending_exception,
bool convert_receiver) { bool convert_receiver) {
*pending_exception = false;
if (!callable->IsJSFunction()) { if (!callable->IsJSFunction()) {
callable = TryGetFunctionDelegate(callable, pending_exception); callable = TryGetFunctionDelegate(callable, pending_exception);
if (*pending_exception) return callable; if (*pending_exception) return callable;
@ -170,15 +172,13 @@ Handle<Object> Execution::Call(Handle<Object> callable,
if (*pending_exception) return callable; if (*pending_exception) return callable;
} }
return Invoke(false, func, receiver, argc, argv, pending_exception); return Invoke(false, func, receiver, argc, args, pending_exception);
} }
Handle<Object> Execution::New(Handle<JSFunction> func, Handle<Object> Execution::New(Handle<JSFunction> func, int argc,
int argc, Object*** args, bool* pending_exception) {
Handle<Object> argv[], return Invoke(true, func, Isolate::Current()->global(), argc, args,
bool* pending_exception) {
return Invoke(true, func, Isolate::Current()->global(), argc, argv,
pending_exception); pending_exception);
} }
@ -186,7 +186,7 @@ Handle<Object> Execution::New(Handle<JSFunction> func,
Handle<Object> Execution::TryCall(Handle<JSFunction> func, Handle<Object> Execution::TryCall(Handle<JSFunction> func,
Handle<Object> receiver, Handle<Object> receiver,
int argc, int argc,
Handle<Object> args[], Object*** args,
bool* caught_exception) { bool* caught_exception) {
// Enter a try-block while executing the JavaScript code. To avoid // Enter a try-block while executing the JavaScript code. To avoid
// duplicate error printing it must be non-verbose. Also, to avoid // duplicate error printing it must be non-verbose. Also, to avoid
@ -195,7 +195,6 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
v8::TryCatch catcher; v8::TryCatch catcher;
catcher.SetVerbose(false); catcher.SetVerbose(false);
catcher.SetCaptureMessage(false); catcher.SetCaptureMessage(false);
*caught_exception = false;
Handle<Object> result = Invoke(false, func, receiver, argc, args, Handle<Object> result = Invoke(false, func, receiver, argc, args,
caught_exception); caught_exception);
@ -378,7 +377,7 @@ void StackGuard::DisableInterrupts() {
bool StackGuard::IsInterrupted() { bool StackGuard::IsInterrupted() {
ExecutionAccess access(isolate_); ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & INTERRUPT) != 0; return thread_local_.interrupt_flags_ & INTERRUPT;
} }
@ -404,7 +403,7 @@ void StackGuard::Preempt() {
bool StackGuard::IsTerminateExecution() { bool StackGuard::IsTerminateExecution() {
ExecutionAccess access(isolate_); ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & TERMINATE) != 0; return thread_local_.interrupt_flags_ & TERMINATE;
} }
@ -417,7 +416,7 @@ void StackGuard::TerminateExecution() {
bool StackGuard::IsRuntimeProfilerTick() { bool StackGuard::IsRuntimeProfilerTick() {
ExecutionAccess access(isolate_); ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK) != 0; return thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK;
} }
@ -434,22 +433,6 @@ void StackGuard::RequestRuntimeProfilerTick() {
} }
bool StackGuard::IsGCRequest() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & GC_REQUEST) != 0;
}
void StackGuard::RequestGC() {
ExecutionAccess access(isolate_);
thread_local_.interrupt_flags_ |= GC_REQUEST;
if (thread_local_.postpone_interrupts_nesting_ == 0) {
thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
isolate_->heap()->SetStackLimits();
}
}
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
bool StackGuard::IsDebugBreak() { bool StackGuard::IsDebugBreak() {
ExecutionAccess access(isolate_); ExecutionAccess access(isolate_);
@ -572,15 +555,14 @@ void StackGuard::InitThread(const ExecutionAccess& lock) {
// --- C a l l s t o n a t i v e s --- // --- C a l l s t o n a t i v e s ---
#define RETURN_NATIVE_CALL(name, args, has_pending_exception) \ #define RETURN_NATIVE_CALL(name, argc, argv, has_pending_exception) \
do { \ do { \
Isolate* isolate = Isolate::Current(); \ Isolate* isolate = Isolate::Current(); \
Handle<Object> argv[] = args; \ Object** args[argc] = argv; \
ASSERT(has_pending_exception != NULL); \ ASSERT(has_pending_exception != NULL); \
return Call(isolate->name##_fun(), \ return Call(isolate->name##_fun(), \
isolate->js_builtins_object(), \ isolate->js_builtins_object(), argc, args, \
ARRAY_SIZE(argv), argv, \ has_pending_exception); \
has_pending_exception); \
} while (false) } while (false)
@ -601,44 +583,44 @@ Handle<Object> Execution::ToBoolean(Handle<Object> obj) {
Handle<Object> Execution::ToNumber(Handle<Object> obj, bool* exc) { Handle<Object> Execution::ToNumber(Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_number, { obj }, exc); RETURN_NATIVE_CALL(to_number, 1, { obj.location() }, exc);
} }
Handle<Object> Execution::ToString(Handle<Object> obj, bool* exc) { Handle<Object> Execution::ToString(Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_string, { obj }, exc); RETURN_NATIVE_CALL(to_string, 1, { obj.location() }, exc);
} }
Handle<Object> Execution::ToDetailString(Handle<Object> obj, bool* exc) { Handle<Object> Execution::ToDetailString(Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_detail_string, { obj }, exc); RETURN_NATIVE_CALL(to_detail_string, 1, { obj.location() }, exc);
} }
Handle<Object> Execution::ToObject(Handle<Object> obj, bool* exc) { Handle<Object> Execution::ToObject(Handle<Object> obj, bool* exc) {
if (obj->IsSpecObject()) return obj; if (obj->IsSpecObject()) return obj;
RETURN_NATIVE_CALL(to_object, { obj }, exc); RETURN_NATIVE_CALL(to_object, 1, { obj.location() }, exc);
} }
Handle<Object> Execution::ToInteger(Handle<Object> obj, bool* exc) { Handle<Object> Execution::ToInteger(Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_integer, { obj }, exc); RETURN_NATIVE_CALL(to_integer, 1, { obj.location() }, exc);
} }
Handle<Object> Execution::ToUint32(Handle<Object> obj, bool* exc) { Handle<Object> Execution::ToUint32(Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_uint32, { obj }, exc); RETURN_NATIVE_CALL(to_uint32, 1, { obj.location() }, exc);
} }
Handle<Object> Execution::ToInt32(Handle<Object> obj, bool* exc) { Handle<Object> Execution::ToInt32(Handle<Object> obj, bool* exc) {
RETURN_NATIVE_CALL(to_int32, { obj }, exc); RETURN_NATIVE_CALL(to_int32, 1, { obj.location() }, exc);
} }
Handle<Object> Execution::NewDate(double time, bool* exc) { Handle<Object> Execution::NewDate(double time, bool* exc) {
Handle<Object> time_obj = FACTORY->NewNumber(time); Handle<Object> time_obj = FACTORY->NewNumber(time);
RETURN_NATIVE_CALL(create_date, { time_obj }, exc); RETURN_NATIVE_CALL(create_date, 1, { time_obj.location() }, exc);
} }
@ -675,7 +657,7 @@ Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
bool caught_exception; bool caught_exception;
Handle<Object> index_object = factory->NewNumberFromInt(int_index); Handle<Object> index_object = factory->NewNumberFromInt(int_index);
Handle<Object> index_arg[] = { index_object }; Object** index_arg[] = { index_object.location() };
Handle<Object> result = TryCall(Handle<JSFunction>::cast(char_at), Handle<Object> result = TryCall(Handle<JSFunction>::cast(char_at),
string, string,
ARRAY_SIZE(index_arg), ARRAY_SIZE(index_arg),
@ -689,8 +671,7 @@ Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
Handle<JSFunction> Execution::InstantiateFunction( Handle<JSFunction> Execution::InstantiateFunction(
Handle<FunctionTemplateInfo> data, Handle<FunctionTemplateInfo> data, bool* exc) {
bool* exc) {
Isolate* isolate = data->GetIsolate(); Isolate* isolate = data->GetIsolate();
// Fast case: see if the function has already been instantiated // Fast case: see if the function has already been instantiated
int serial_number = Smi::cast(data->serial_number())->value(); int serial_number = Smi::cast(data->serial_number())->value();
@ -699,12 +680,10 @@ Handle<JSFunction> Execution::InstantiateFunction(
GetElementNoExceptionThrown(serial_number); GetElementNoExceptionThrown(serial_number);
if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm)); if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
// The function has not yet been instantiated in this context; do it. // The function has not yet been instantiated in this context; do it.
Handle<Object> args[] = { data }; Object** args[1] = { Handle<Object>::cast(data).location() };
Handle<Object> result = Call(isolate->instantiate_fun(), Handle<Object> result =
isolate->js_builtins_object(), Call(isolate->instantiate_fun(),
ARRAY_SIZE(args), isolate->js_builtins_object(), 1, args, exc);
args,
exc);
if (*exc) return Handle<JSFunction>::null(); if (*exc) return Handle<JSFunction>::null();
return Handle<JSFunction>::cast(result); return Handle<JSFunction>::cast(result);
} }
@ -731,12 +710,10 @@ Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data,
ASSERT(!*exc); ASSERT(!*exc);
return Handle<JSObject>(JSObject::cast(result)); return Handle<JSObject>(JSObject::cast(result));
} else { } else {
Handle<Object> args[] = { data }; Object** args[1] = { Handle<Object>::cast(data).location() };
Handle<Object> result = Call(isolate->instantiate_fun(), Handle<Object> result =
isolate->js_builtins_object(), Call(isolate->instantiate_fun(),
ARRAY_SIZE(args), isolate->js_builtins_object(), 1, args, exc);
args,
exc);
if (*exc) return Handle<JSObject>::null(); if (*exc) return Handle<JSObject>::null();
return Handle<JSObject>::cast(result); return Handle<JSObject>::cast(result);
} }
@ -747,12 +724,9 @@ void Execution::ConfigureInstance(Handle<Object> instance,
Handle<Object> instance_template, Handle<Object> instance_template,
bool* exc) { bool* exc) {
Isolate* isolate = Isolate::Current(); Isolate* isolate = Isolate::Current();
Handle<Object> args[] = { instance, instance_template }; Object** args[2] = { instance.location(), instance_template.location() };
Execution::Call(isolate->configure_instance_fun(), Execution::Call(isolate->configure_instance_fun(),
isolate->js_builtins_object(), isolate->js_builtins_object(), 2, args, exc);
ARRAY_SIZE(args),
args,
exc);
} }
@ -761,13 +735,16 @@ Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
Handle<Object> pos, Handle<Object> pos,
Handle<Object> is_global) { Handle<Object> is_global) {
Isolate* isolate = fun->GetIsolate(); Isolate* isolate = fun->GetIsolate();
Handle<Object> args[] = { recv, fun, pos, is_global }; const int argc = 4;
bool caught_exception; Object** args[argc] = { recv.location(),
Handle<Object> result = TryCall(isolate->get_stack_trace_line_fun(), Handle<Object>::cast(fun).location(),
isolate->js_builtins_object(), pos.location(),
ARRAY_SIZE(args), is_global.location() };
args, bool caught_exception = false;
&caught_exception); Handle<Object> result =
TryCall(isolate->get_stack_trace_line_fun(),
isolate->js_builtins_object(), argc, args,
&caught_exception);
if (caught_exception || !result->IsString()) { if (caught_exception || !result->IsString()) {
return isolate->factory()->empty_symbol(); return isolate->factory()->empty_symbol();
} }
@ -875,12 +852,6 @@ void Execution::ProcessDebugMesssages(bool debug_command_only) {
MaybeObject* Execution::HandleStackGuardInterrupt() { MaybeObject* Execution::HandleStackGuardInterrupt() {
Isolate* isolate = Isolate::Current(); Isolate* isolate = Isolate::Current();
StackGuard* stack_guard = isolate->stack_guard(); StackGuard* stack_guard = isolate->stack_guard();
if (stack_guard->IsGCRequest()) {
isolate->heap()->CollectAllGarbage(false);
stack_guard->Continue(GC_REQUEST);
}
isolate->counters()->stack_interrupts()->Increment(); isolate->counters()->stack_interrupts()->Increment();
if (stack_guard->IsRuntimeProfilerTick()) { if (stack_guard->IsRuntimeProfilerTick()) {
isolate->counters()->runtime_profiler_ticks()->Increment(); isolate->counters()->runtime_profiler_ticks()->Increment();

13
deps/v8/src/execution.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -41,8 +41,7 @@ enum InterruptFlag {
DEBUGCOMMAND = 1 << 2, DEBUGCOMMAND = 1 << 2,
PREEMPT = 1 << 3, PREEMPT = 1 << 3,
TERMINATE = 1 << 4, TERMINATE = 1 << 4,
RUNTIME_PROFILER_TICK = 1 << 5, RUNTIME_PROFILER_TICK = 1 << 5
GC_REQUEST = 1 << 6
}; };
class Execution : public AllStatic { class Execution : public AllStatic {
@ -61,7 +60,7 @@ class Execution : public AllStatic {
static Handle<Object> Call(Handle<Object> callable, static Handle<Object> Call(Handle<Object> callable,
Handle<Object> receiver, Handle<Object> receiver,
int argc, int argc,
Handle<Object> argv[], Object*** args,
bool* pending_exception, bool* pending_exception,
bool convert_receiver = false); bool convert_receiver = false);
@ -74,7 +73,7 @@ class Execution : public AllStatic {
// //
static Handle<Object> New(Handle<JSFunction> func, static Handle<Object> New(Handle<JSFunction> func,
int argc, int argc,
Handle<Object> argv[], Object*** args,
bool* pending_exception); bool* pending_exception);
// Call a function, just like Call(), but make sure to silently catch // Call a function, just like Call(), but make sure to silently catch
@ -84,7 +83,7 @@ class Execution : public AllStatic {
static Handle<Object> TryCall(Handle<JSFunction> func, static Handle<Object> TryCall(Handle<JSFunction> func,
Handle<Object> receiver, Handle<Object> receiver,
int argc, int argc,
Handle<Object> argv[], Object*** args,
bool* caught_exception); bool* caught_exception);
// ECMA-262 9.2 // ECMA-262 9.2
@ -197,8 +196,6 @@ class StackGuard {
bool IsDebugCommand(); bool IsDebugCommand();
void DebugCommand(); void DebugCommand();
#endif #endif
bool IsGCRequest();
void RequestGC();
void Continue(InterruptFlag after_what); void Continue(InterruptFlag after_what);
// This provides an asynchronous read of the stack limits for the current // This provides an asynchronous read of the stack limits for the current

7
deps/v8/src/extensions/gc-extension.cc

@ -40,7 +40,12 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) { v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
HEAP->CollectAllGarbage(Heap::kNoGCFlags); bool compact = false;
// All allocation spaces other than NEW_SPACE have the same effect.
if (args.Length() >= 1 && args[0]->IsBoolean()) {
compact = args[0]->BooleanValue();
}
HEAP->CollectAllGarbage(compact);
return v8::Undefined(); return v8::Undefined();
} }

96
deps/v8/src/factory.cc

@ -234,7 +234,7 @@ Handle<String> Factory::NewProperSubString(Handle<String> str,
Handle<String> Factory::NewExternalStringFromAscii( Handle<String> Factory::NewExternalStringFromAscii(
const ExternalAsciiString::Resource* resource) { ExternalAsciiString::Resource* resource) {
CALL_HEAP_FUNCTION( CALL_HEAP_FUNCTION(
isolate(), isolate(),
isolate()->heap()->AllocateExternalStringFromAscii(resource), isolate()->heap()->AllocateExternalStringFromAscii(resource),
@ -243,7 +243,7 @@ Handle<String> Factory::NewExternalStringFromAscii(
Handle<String> Factory::NewExternalStringFromTwoByte( Handle<String> Factory::NewExternalStringFromTwoByte(
const ExternalTwoByteString::Resource* resource) { ExternalTwoByteString::Resource* resource) {
CALL_HEAP_FUNCTION( CALL_HEAP_FUNCTION(
isolate(), isolate(),
isolate()->heap()->AllocateExternalStringFromTwoByte(resource), isolate()->heap()->AllocateExternalStringFromTwoByte(resource),
@ -404,12 +404,10 @@ Handle<JSGlobalPropertyCell> Factory::NewJSGlobalPropertyCell(
} }
Handle<Map> Factory::NewMap(InstanceType type, Handle<Map> Factory::NewMap(InstanceType type, int instance_size) {
int instance_size,
ElementsKind elements_kind) {
CALL_HEAP_FUNCTION( CALL_HEAP_FUNCTION(
isolate(), isolate(),
isolate()->heap()->AllocateMap(type, instance_size, elements_kind), isolate()->heap()->AllocateMap(type, instance_size),
Map); Map);
} }
@ -457,11 +455,23 @@ Handle<Map> Factory::CopyMapDropTransitions(Handle<Map> src) {
} }
Handle<Map> Factory::GetFastElementsMap(Handle<Map> src) {
CALL_HEAP_FUNCTION(isolate(), src->GetFastElementsMap(), Map);
}
Handle<Map> Factory::GetSlowElementsMap(Handle<Map> src) {
CALL_HEAP_FUNCTION(isolate(), src->GetSlowElementsMap(), Map);
}
Handle<Map> Factory::GetElementsTransitionMap( Handle<Map> Factory::GetElementsTransitionMap(
Handle<JSObject> src, Handle<Map> src,
ElementsKind elements_kind) { ElementsKind elements_kind,
bool safe_to_add_transition) {
CALL_HEAP_FUNCTION(isolate(), CALL_HEAP_FUNCTION(isolate(),
src->GetElementsTransitionMap(elements_kind), src->GetElementsTransitionMap(elements_kind,
safe_to_add_transition),
Map); Map);
} }
@ -631,16 +641,14 @@ Handle<Object> Factory::NewError(const char* maker,
return undefined_value(); return undefined_value();
Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj); Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
Handle<Object> type_obj = LookupAsciiSymbol(type); Handle<Object> type_obj = LookupAsciiSymbol(type);
Handle<Object> argv[] = { type_obj, args }; Object** argv[2] = { type_obj.location(),
Handle<Object>::cast(args).location() };
// Invoke the JavaScript factory method. If an exception is thrown while // Invoke the JavaScript factory method. If an exception is thrown while
// running the factory method, use the exception as the result. // running the factory method, use the exception as the result.
bool caught_exception; bool caught_exception;
Handle<Object> result = Execution::TryCall(fun, Handle<Object> result = Execution::TryCall(fun,
isolate()->js_builtins_object(), isolate()->js_builtins_object(), 2, argv, &caught_exception);
ARRAY_SIZE(argv),
argv,
&caught_exception);
return result; return result;
} }
@ -656,16 +664,13 @@ Handle<Object> Factory::NewError(const char* constructor,
Handle<JSFunction> fun = Handle<JSFunction>( Handle<JSFunction> fun = Handle<JSFunction>(
JSFunction::cast(isolate()->js_builtins_object()-> JSFunction::cast(isolate()->js_builtins_object()->
GetPropertyNoExceptionThrown(*constr))); GetPropertyNoExceptionThrown(*constr)));
Handle<Object> argv[] = { message }; Object** argv[1] = { Handle<Object>::cast(message).location() };
// Invoke the JavaScript factory method. If an exception is thrown while // Invoke the JavaScript factory method. If an exception is thrown while
// running the factory method, use the exception as the result. // running the factory method, use the exception as the result.
bool caught_exception; bool caught_exception;
Handle<Object> result = Execution::TryCall(fun, Handle<Object> result = Execution::TryCall(fun,
isolate()->js_builtins_object(), isolate()->js_builtins_object(), 1, argv, &caught_exception);
ARRAY_SIZE(argv),
argv,
&caught_exception);
return result; return result;
} }
@ -717,12 +722,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
if (force_initial_map || if (force_initial_map ||
type != JS_OBJECT_TYPE || type != JS_OBJECT_TYPE ||
instance_size != JSObject::kHeaderSize) { instance_size != JSObject::kHeaderSize) {
ElementsKind default_elements_kind = FLAG_smi_only_arrays Handle<Map> initial_map = NewMap(type, instance_size);
? FAST_SMI_ONLY_ELEMENTS
: FAST_ELEMENTS;
Handle<Map> initial_map = NewMap(type,
instance_size,
default_elements_kind);
function->set_initial_map(*initial_map); function->set_initial_map(*initial_map);
initial_map->set_constructor(*function); initial_map->set_constructor(*function);
} }
@ -908,26 +908,11 @@ Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArray> elements,
Handle<JSArray> result = Handle<JSArray> result =
Handle<JSArray>::cast(NewJSObject(isolate()->array_function(), Handle<JSArray>::cast(NewJSObject(isolate()->array_function(),
pretenure)); pretenure));
SetContent(result, elements); result->SetContent(*elements);
return result; return result;
} }
void Factory::SetContent(Handle<JSArray> array,
Handle<FixedArray> elements) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
array->SetContent(*elements));
}
void Factory::EnsureCanContainNonSmiElements(Handle<JSArray> array) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
array->EnsureCanContainNonSmiElements());
}
Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler, Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler,
Handle<Object> prototype) { Handle<Object> prototype) {
CALL_HEAP_FUNCTION( CALL_HEAP_FUNCTION(
@ -953,13 +938,6 @@ void Factory::BecomeJSFunction(Handle<JSReceiver> object) {
} }
void Factory::SetIdentityHash(Handle<JSObject> object, Object* hash) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
object->SetIdentityHash(hash, ALLOW_CREATION));
}
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo( Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<String> name, Handle<String> name,
int number_of_literals, int number_of_literals,
@ -1012,12 +990,6 @@ Handle<String> Factory::NumberToString(Handle<Object> number) {
} }
Handle<String> Factory::Uint32ToString(uint32_t value) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->Uint32ToString(value), String);
}
Handle<NumberDictionary> Factory::DictionaryAtNumberPut( Handle<NumberDictionary> Factory::DictionaryAtNumberPut(
Handle<NumberDictionary> dictionary, Handle<NumberDictionary> dictionary,
uint32_t key, uint32_t key,
@ -1327,20 +1299,4 @@ void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc,
} }
Handle<Object> Factory::GlobalConstantFor(Handle<String> name) {
Heap* h = isolate()->heap();
if (name->Equals(h->undefined_symbol())) return undefined_value();
if (name->Equals(h->nan_symbol())) return nan_value();
if (name->Equals(h->infinity_symbol())) return infinity_value();
return Handle<Object>::null();
}
Handle<Object> Factory::ToBoolean(bool value) {
return Handle<Object>(value
? isolate()->heap()->true_value()
: isolate()->heap()->false_value());
}
} } // namespace v8::internal } } // namespace v8::internal

32
deps/v8/src/factory.h

@ -145,9 +145,9 @@ class Factory {
// not make sense to have a UTF-8 factory function for external strings, // not make sense to have a UTF-8 factory function for external strings,
// because we cannot change the underlying buffer. // because we cannot change the underlying buffer.
Handle<String> NewExternalStringFromAscii( Handle<String> NewExternalStringFromAscii(
const ExternalAsciiString::Resource* resource); ExternalAsciiString::Resource* resource);
Handle<String> NewExternalStringFromTwoByte( Handle<String> NewExternalStringFromTwoByte(
const ExternalTwoByteString::Resource* resource); ExternalTwoByteString::Resource* resource);
// Create a global (but otherwise uninitialized) context. // Create a global (but otherwise uninitialized) context.
Handle<Context> NewGlobalContext(); Handle<Context> NewGlobalContext();
@ -203,9 +203,7 @@ class Factory {
Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell( Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
Handle<Object> value); Handle<Object> value);
Handle<Map> NewMap(InstanceType type, Handle<Map> NewMap(InstanceType type, int instance_size);
int instance_size,
ElementsKind elements_kind = FAST_ELEMENTS);
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function); Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
@ -217,8 +215,13 @@ class Factory {
Handle<Map> CopyMapDropTransitions(Handle<Map> map); Handle<Map> CopyMapDropTransitions(Handle<Map> map);
Handle<Map> GetElementsTransitionMap(Handle<JSObject> object, Handle<Map> GetFastElementsMap(Handle<Map> map);
ElementsKind elements_kind);
Handle<Map> GetSlowElementsMap(Handle<Map> map);
Handle<Map> GetElementsTransitionMap(Handle<Map> map,
ElementsKind elements_kind,
bool safe_to_add_transition);
Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array); Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
@ -255,18 +258,12 @@ class Factory {
Handle<FixedArray> elements, Handle<FixedArray> elements,
PretenureFlag pretenure = NOT_TENURED); PretenureFlag pretenure = NOT_TENURED);
void SetContent(Handle<JSArray> array, Handle<FixedArray> elements);
void EnsureCanContainNonSmiElements(Handle<JSArray> array);
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype); Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
// Change the type of the argument into a JS object/function and reinitialize. // Change the type of the argument into a JS object/function and reinitialize.
void BecomeJSObject(Handle<JSReceiver> object); void BecomeJSObject(Handle<JSReceiver> object);
void BecomeJSFunction(Handle<JSReceiver> object); void BecomeJSFunction(Handle<JSReceiver> object);
void SetIdentityHash(Handle<JSObject> object, Object* hash);
Handle<JSFunction> NewFunction(Handle<String> name, Handle<JSFunction> NewFunction(Handle<String> name,
Handle<Object> prototype); Handle<Object> prototype);
@ -359,7 +356,6 @@ class Factory {
PropertyAttributes attributes); PropertyAttributes attributes);
Handle<String> NumberToString(Handle<Object> number); Handle<String> NumberToString(Handle<Object> number);
Handle<String> Uint32ToString(uint32_t value);
enum ApiInstanceType { enum ApiInstanceType {
JavaScriptObject, JavaScriptObject,
@ -446,14 +442,6 @@ class Factory {
JSRegExp::Flags flags, JSRegExp::Flags flags,
int capture_count); int capture_count);
// Returns the value for a known global constant (a property of the global
// object which is neither configurable nor writable) like 'undefined'.
// Returns a null handle when the given name is unknown.
Handle<Object> GlobalConstantFor(Handle<String> name);
// Converts the given boolean condition to JavaScript boolean value.
Handle<Object> ToBoolean(bool value);
private: private:
Isolate* isolate() { return reinterpret_cast<Isolate*>(this); } Isolate* isolate() { return reinterpret_cast<Isolate*>(this); }

24
deps/v8/src/flag-definitions.h

@ -104,7 +104,6 @@ DEFINE_bool(harmony_block_scoping, false, "enable harmony block scoping")
// Flags for experimental implementation features. // Flags for experimental implementation features.
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles") DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
DEFINE_bool(smi_only_arrays, false, "tracks arrays with only smi values")
DEFINE_bool(string_slices, false, "use string slices") DEFINE_bool(string_slices, false, "use string slices")
// Flags for Crankshaft. // Flags for Crankshaft.
@ -254,16 +253,10 @@ DEFINE_bool(print_cumulative_gc_stat, false,
"print cumulative GC statistics in name=value format on exit") "print cumulative GC statistics in name=value format on exit")
DEFINE_bool(trace_gc_verbose, false, DEFINE_bool(trace_gc_verbose, false,
"print more details following each garbage collection") "print more details following each garbage collection")
DEFINE_bool(trace_fragmentation, false,
"report fragmentation for old pointer and data pages")
DEFINE_bool(collect_maps, true, DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached") "garbage collect maps from which no objects can be reached")
DEFINE_bool(flush_code, true, DEFINE_bool(flush_code, true,
"flush code that we expect not to use again before full gc") "flush code that we expect not to use again before full gc")
DEFINE_bool(incremental_marking, true, "use incremental marking")
DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps")
DEFINE_bool(trace_incremental_marking, false,
"trace progress of the incremental marking")
// v8.cc // v8.cc
DEFINE_bool(use_idle_notification, true, DEFINE_bool(use_idle_notification, true,
@ -283,13 +276,8 @@ DEFINE_bool(native_code_counters, false,
// mark-compact.cc // mark-compact.cc
DEFINE_bool(always_compact, false, "Perform compaction on every full GC") DEFINE_bool(always_compact, false, "Perform compaction on every full GC")
DEFINE_bool(lazy_sweeping, true,
"Use lazy sweeping for old pointer and data spaces")
DEFINE_bool(cleanup_caches_in_maps_at_gc, true,
"Flush code caches in maps during mark compact cycle.")
DEFINE_bool(never_compact, false, DEFINE_bool(never_compact, false,
"Never perform compaction on full GC - testing only") "Never perform compaction on full GC - testing only")
DEFINE_bool(compact_code_space, false, "Compact code space")
DEFINE_bool(cleanup_code_caches_at_gc, true, DEFINE_bool(cleanup_code_caches_at_gc, true,
"Flush inline caches prior to mark compact collection and " "Flush inline caches prior to mark compact collection and "
"flush code caches in maps during mark compact cycle.") "flush code caches in maps during mark compact cycle.")
@ -360,15 +348,11 @@ DEFINE_string(testing_serialization_file, "/tmp/serdes",
DEFINE_bool(help, false, "Print usage message, including flags, on console") DEFINE_bool(help, false, "Print usage message, including flags, on console")
DEFINE_bool(dump_counters, false, "Dump counters on exit") DEFINE_bool(dump_counters, false, "Dump counters on exit")
#ifdef ENABLE_DEBUGGER_SUPPORT
DEFINE_bool(debugger, false, "Enable JavaScript debugger") DEFINE_bool(debugger, false, "Enable JavaScript debugger")
DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the " DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the "
"debugger agent in another process") "debugger agent in another process")
DEFINE_bool(debugger_agent, false, "Enable debugger agent") DEFINE_bool(debugger_agent, false, "Enable debugger agent")
DEFINE_int(debugger_port, 5858, "Port to use for remote debugging") DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
#endif // ENABLE_DEBUGGER_SUPPORT
DEFINE_string(map_counters, "", "Map counters to a file") DEFINE_string(map_counters, "", "Map counters to a file")
DEFINE_args(js_arguments, JSArguments(), DEFINE_args(js_arguments, JSArguments(),
"Pass all remaining arguments to the script. Alias for \"--\".") "Pass all remaining arguments to the script. Alias for \"--\".")
@ -441,11 +425,6 @@ DEFINE_bool(print_global_handles, false, "report global handles after GC")
// ic.cc // ic.cc
DEFINE_bool(trace_ic, false, "trace inline cache state transitions") DEFINE_bool(trace_ic, false, "trace inline cache state transitions")
// mark-compact.cc
DEFINE_bool(force_marking_deque_overflows, false,
"force overflows of marking deque by reducing it's size "
"to 64 words")
// objects.cc // objects.cc
DEFINE_bool(trace_normalization, DEFINE_bool(trace_normalization,
false, false,
@ -465,9 +444,6 @@ DEFINE_bool(collect_heap_spill_statistics, false,
DEFINE_bool(trace_isolates, false, "trace isolate state changes") DEFINE_bool(trace_isolates, false, "trace isolate state changes")
DEFINE_bool(trace_live_byte_count, false,
"trace updates to page live byte count")
// VM state // VM state
DEFINE_bool(log_state_changes, false, "Log state changes.") DEFINE_bool(log_state_changes, false, "Log state changes.")

67
deps/v8/src/frames-inl.h

@ -77,21 +77,6 @@ inline StackHandler* StackHandler::FromAddress(Address address) {
} }
inline bool StackHandler::is_entry() const {
return state() == ENTRY;
}
inline bool StackHandler::is_try_catch() const {
return state() == TRY_CATCH;
}
inline bool StackHandler::is_try_finally() const {
return state() == TRY_FINALLY;
}
inline StackHandler::State StackHandler::state() const { inline StackHandler::State StackHandler::state() const {
const int offset = StackHandlerConstants::kStateOffset; const int offset = StackHandlerConstants::kStateOffset;
return static_cast<State>(Memory::int_at(address() + offset)); return static_cast<State>(Memory::int_at(address() + offset));
@ -120,33 +105,8 @@ inline StackHandler* StackFrame::top_handler() const {
} }
inline Code* StackFrame::LookupCode() const {
return GetContainingCode(isolate(), pc());
}
inline Code* StackFrame::GetContainingCode(Isolate* isolate, Address pc) { inline Code* StackFrame::GetContainingCode(Isolate* isolate, Address pc) {
return isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code; return isolate->pc_to_code_cache()->GetCacheEntry(pc)->code;
}
inline EntryFrame::EntryFrame(StackFrameIterator* iterator)
: StackFrame(iterator) {
}
inline EntryConstructFrame::EntryConstructFrame(StackFrameIterator* iterator)
: EntryFrame(iterator) {
}
inline ExitFrame::ExitFrame(StackFrameIterator* iterator)
: StackFrame(iterator) {
}
inline StandardFrame::StandardFrame(StackFrameIterator* iterator)
: StackFrame(iterator) {
} }
@ -195,11 +155,6 @@ inline bool StandardFrame::IsConstructFrame(Address fp) {
} }
inline JavaScriptFrame::JavaScriptFrame(StackFrameIterator* iterator)
: StandardFrame(iterator) {
}
Address JavaScriptFrame::GetParameterSlot(int index) const { Address JavaScriptFrame::GetParameterSlot(int index) const {
int param_count = ComputeParametersCount(); int param_count = ComputeParametersCount();
ASSERT(-1 <= index && index < param_count); ASSERT(-1 <= index && index < param_count);
@ -235,26 +190,6 @@ inline Object* JavaScriptFrame::function() const {
} }
inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator)
: JavaScriptFrame(iterator) {
}
inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
StackFrameIterator* iterator) : JavaScriptFrame(iterator) {
}
inline InternalFrame::InternalFrame(StackFrameIterator* iterator)
: StandardFrame(iterator) {
}
inline ConstructFrame::ConstructFrame(StackFrameIterator* iterator)
: InternalFrame(iterator) {
}
template<typename Iterator> template<typename Iterator>
inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp( inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
Isolate* isolate) Isolate* isolate)

117
deps/v8/src/frames.cc

@ -366,17 +366,16 @@ void SafeStackTraceFrameIterator::Advance() {
Code* StackFrame::GetSafepointData(Isolate* isolate, Code* StackFrame::GetSafepointData(Isolate* isolate,
Address inner_pointer, Address pc,
SafepointEntry* safepoint_entry, SafepointEntry* safepoint_entry,
unsigned* stack_slots) { unsigned* stack_slots) {
InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry = PcToCodeCache::PcToCodeCacheEntry* entry =
isolate->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer); isolate->pc_to_code_cache()->GetCacheEntry(pc);
if (!entry->safepoint_entry.is_valid()) { if (!entry->safepoint_entry.is_valid()) {
entry->safepoint_entry = entry->code->GetSafepointEntry(inner_pointer); entry->safepoint_entry = entry->code->GetSafepointEntry(pc);
ASSERT(entry->safepoint_entry.is_valid()); ASSERT(entry->safepoint_entry.is_valid());
} else { } else {
ASSERT(entry->safepoint_entry.Equals( ASSERT(entry->safepoint_entry.Equals(entry->code->GetSafepointEntry(pc)));
entry->code->GetSafepointEntry(inner_pointer)));
} }
// Fill in the results and return the code. // Fill in the results and return the code.
@ -393,16 +392,11 @@ bool StackFrame::HasHandler() const {
} }
#ifdef DEBUG
static bool GcSafeCodeContains(HeapObject* object, Address addr);
#endif
void StackFrame::IteratePc(ObjectVisitor* v, void StackFrame::IteratePc(ObjectVisitor* v,
Address* pc_address, Address* pc_address,
Code* holder) { Code* holder) {
Address pc = *pc_address; Address pc = *pc_address;
ASSERT(GcSafeCodeContains(holder, pc)); ASSERT(holder->contains(pc));
unsigned pc_offset = static_cast<unsigned>(pc - holder->instruction_start()); unsigned pc_offset = static_cast<unsigned>(pc - holder->instruction_start());
Object* code = holder; Object* code = holder;
v->VisitPointer(&code); v->VisitPointer(&code);
@ -825,8 +819,7 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
// back to a slow search in this case to find the original optimized // back to a slow search in this case to find the original optimized
// code object. // code object.
if (!code->contains(pc())) { if (!code->contains(pc())) {
code = isolate()->inner_pointer_to_code_cache()-> code = isolate()->pc_to_code_cache()->GcSafeFindCodeForPc(pc());
GcSafeFindCodeForInnerPointer(pc());
} }
ASSERT(code != NULL); ASSERT(code != NULL);
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION); ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
@ -888,11 +881,6 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
} }
int ArgumentsAdaptorFrame::GetNumberOfIncomingArguments() const {
return Smi::cast(GetExpression(0))->value();
}
Address ArgumentsAdaptorFrame::GetCallerStackPointer() const { Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
return fp() + StandardFrameConstants::kCallerSPOffset; return fp() + StandardFrameConstants::kCallerSPOffset;
} }
@ -1167,89 +1155,52 @@ JavaScriptFrame* StackFrameLocator::FindJavaScriptFrame(int n) {
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
static Map* GcSafeMapOfCodeSpaceObject(HeapObject* object) { Code* PcToCodeCache::GcSafeCastToCode(HeapObject* object, Address pc) {
MapWord map_word = object->map_word();
return map_word.IsForwardingAddress() ?
map_word.ToForwardingAddress()->map() : map_word.ToMap();
}
static int GcSafeSizeOfCodeSpaceObject(HeapObject* object) {
return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object));
}
#ifdef DEBUG
static bool GcSafeCodeContains(HeapObject* code, Address addr) {
Map* map = GcSafeMapOfCodeSpaceObject(code);
ASSERT(map == code->GetHeap()->code_map());
Address start = code->address();
Address end = code->address() + code->SizeFromMap(map);
return start <= addr && addr < end;
}
#endif
Code* InnerPointerToCodeCache::GcSafeCastToCode(HeapObject* object,
Address inner_pointer) {
Code* code = reinterpret_cast<Code*>(object); Code* code = reinterpret_cast<Code*>(object);
ASSERT(code != NULL && GcSafeCodeContains(code, inner_pointer)); ASSERT(code != NULL && code->contains(pc));
return code; return code;
} }
Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer( Code* PcToCodeCache::GcSafeFindCodeForPc(Address pc) {
Address inner_pointer) {
Heap* heap = isolate_->heap(); Heap* heap = isolate_->heap();
// Check if the inner pointer points into a large object chunk. // Check if the pc points into a large object chunk.
LargePage* large_page = heap->lo_space()->FindPageContainingPc(inner_pointer); LargeObjectChunk* chunk = heap->lo_space()->FindChunkContainingPc(pc);
if (large_page != NULL) { if (chunk != NULL) return GcSafeCastToCode(chunk->GetObject(), pc);
return GcSafeCastToCode(large_page->GetObject(), inner_pointer);
} // Iterate through the 8K page until we reach the end or find an
// object starting after the pc.
// Iterate through the page until we reach the end or find an object starting Page* page = Page::FromAddress(pc);
// after the inner pointer. HeapObjectIterator iterator(page, heap->GcSafeSizeOfOldObjectFunction());
Page* page = Page::FromAddress(inner_pointer); HeapObject* previous = NULL;
Address addr = page->skip_list()->StartFor(inner_pointer);
Address top = heap->code_space()->top();
Address limit = heap->code_space()->limit();
while (true) { while (true) {
if (addr == top && addr != limit) { HeapObject* next = iterator.next();
addr = limit; if (next == NULL || next->address() >= pc) {
continue; return GcSafeCastToCode(previous, pc);
} }
previous = next;
HeapObject* obj = HeapObject::FromAddress(addr);
int obj_size = GcSafeSizeOfCodeSpaceObject(obj);
Address next_addr = addr + obj_size;
if (next_addr > inner_pointer) return GcSafeCastToCode(obj, inner_pointer);
addr = next_addr;
} }
} }
InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
isolate_->counters()->pc_to_code()->Increment(); isolate_->counters()->pc_to_code()->Increment();
ASSERT(IsPowerOf2(kInnerPointerToCodeCacheSize)); ASSERT(IsPowerOf2(kPcToCodeCacheSize));
uint32_t hash = ComputeIntegerHash( uint32_t hash = ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(inner_pointer))); static_cast<uint32_t>(reinterpret_cast<uintptr_t>(pc)));
uint32_t index = hash & (kInnerPointerToCodeCacheSize - 1); uint32_t index = hash & (kPcToCodeCacheSize - 1);
InnerPointerToCodeCacheEntry* entry = cache(index); PcToCodeCacheEntry* entry = cache(index);
if (entry->inner_pointer == inner_pointer) { if (entry->pc == pc) {
isolate_->counters()->pc_to_code_cached()->Increment(); isolate_->counters()->pc_to_code_cached()->Increment();
ASSERT(entry->code == GcSafeFindCodeForInnerPointer(inner_pointer)); ASSERT(entry->code == GcSafeFindCodeForPc(pc));
} else { } else {
// Because this code may be interrupted by a profiling signal that // Because this code may be interrupted by a profiling signal that
// also queries the cache, we cannot update inner_pointer before the code // also queries the cache, we cannot update pc before the code has
// has been set. Otherwise, we risk trying to use a cache entry before // been set. Otherwise, we risk trying to use a cache entry before
// the code has been computed. // the code has been computed.
entry->code = GcSafeFindCodeForInnerPointer(inner_pointer); entry->code = GcSafeFindCodeForPc(pc);
entry->safepoint_entry.Reset(); entry->safepoint_entry.Reset();
entry->inner_pointer = inner_pointer; entry->pc = pc;
} }
return entry; return entry;
} }

78
deps/v8/src/frames.h

@ -49,36 +49,36 @@ class StackFrameIterator;
class ThreadLocalTop; class ThreadLocalTop;
class Isolate; class Isolate;
class InnerPointerToCodeCache { class PcToCodeCache {
public: public:
struct InnerPointerToCodeCacheEntry { struct PcToCodeCacheEntry {
Address inner_pointer; Address pc;
Code* code; Code* code;
SafepointEntry safepoint_entry; SafepointEntry safepoint_entry;
}; };
explicit InnerPointerToCodeCache(Isolate* isolate) : isolate_(isolate) { explicit PcToCodeCache(Isolate* isolate) : isolate_(isolate) {
Flush(); Flush();
} }
Code* GcSafeFindCodeForInnerPointer(Address inner_pointer); Code* GcSafeFindCodeForPc(Address pc);
Code* GcSafeCastToCode(HeapObject* object, Address inner_pointer); Code* GcSafeCastToCode(HeapObject* object, Address pc);
void Flush() { void Flush() {
memset(&cache_[0], 0, sizeof(cache_)); memset(&cache_[0], 0, sizeof(cache_));
} }
InnerPointerToCodeCacheEntry* GetCacheEntry(Address inner_pointer); PcToCodeCacheEntry* GetCacheEntry(Address pc);
private: private:
InnerPointerToCodeCacheEntry* cache(int index) { return &cache_[index]; } PcToCodeCacheEntry* cache(int index) { return &cache_[index]; }
Isolate* isolate_; Isolate* isolate_;
static const int kInnerPointerToCodeCacheSize = 1024; static const int kPcToCodeCacheSize = 1024;
InnerPointerToCodeCacheEntry cache_[kInnerPointerToCodeCacheSize]; PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
DISALLOW_COPY_AND_ASSIGN(InnerPointerToCodeCache); DISALLOW_COPY_AND_ASSIGN(PcToCodeCache);
}; };
@ -106,9 +106,9 @@ class StackHandler BASE_EMBEDDED {
static inline StackHandler* FromAddress(Address address); static inline StackHandler* FromAddress(Address address);
// Testers // Testers
inline bool is_entry() const; bool is_entry() { return state() == ENTRY; }
inline bool is_try_catch() const; bool is_try_catch() { return state() == TRY_CATCH; }
inline bool is_try_finally() const; bool is_try_finally() { return state() == TRY_FINALLY; }
private: private:
// Accessors. // Accessors.
@ -139,10 +139,7 @@ class StackFrame BASE_EMBEDDED {
enum Type { enum Type {
NONE = 0, NONE = 0,
STACK_FRAME_TYPE_LIST(DECLARE_TYPE) STACK_FRAME_TYPE_LIST(DECLARE_TYPE)
NUMBER_OF_TYPES, NUMBER_OF_TYPES
// Used by FrameScope to indicate that the stack frame is constructed
// manually and the FrameScope does not need to emit code.
MANUAL
}; };
#undef DECLARE_TYPE #undef DECLARE_TYPE
@ -218,7 +215,9 @@ class StackFrame BASE_EMBEDDED {
virtual Code* unchecked_code() const = 0; virtual Code* unchecked_code() const = 0;
// Get the code associated with this frame. // Get the code associated with this frame.
inline Code* LookupCode() const; Code* LookupCode() const {
return GetContainingCode(isolate(), pc());
}
// Get the code object that contains the given pc. // Get the code object that contains the given pc.
static inline Code* GetContainingCode(Isolate* isolate, Address pc); static inline Code* GetContainingCode(Isolate* isolate, Address pc);
@ -300,7 +299,7 @@ class EntryFrame: public StackFrame {
virtual void SetCallerFp(Address caller_fp); virtual void SetCallerFp(Address caller_fp);
protected: protected:
inline explicit EntryFrame(StackFrameIterator* iterator); explicit EntryFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
// The caller stack pointer for entry frames is always zero. The // The caller stack pointer for entry frames is always zero. The
// real information about the caller frame is available through the // real information about the caller frame is available through the
@ -327,7 +326,8 @@ class EntryConstructFrame: public EntryFrame {
} }
protected: protected:
inline explicit EntryConstructFrame(StackFrameIterator* iterator); explicit EntryConstructFrame(StackFrameIterator* iterator)
: EntryFrame(iterator) { }
private: private:
friend class StackFrameIterator; friend class StackFrameIterator;
@ -361,7 +361,7 @@ class ExitFrame: public StackFrame {
static void FillState(Address fp, Address sp, State* state); static void FillState(Address fp, Address sp, State* state);
protected: protected:
inline explicit ExitFrame(StackFrameIterator* iterator); explicit ExitFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
virtual Address GetCallerStackPointer() const; virtual Address GetCallerStackPointer() const;
@ -394,7 +394,8 @@ class StandardFrame: public StackFrame {
} }
protected: protected:
inline explicit StandardFrame(StackFrameIterator* iterator); explicit StandardFrame(StackFrameIterator* iterator)
: StackFrame(iterator) { }
virtual void ComputeCallerState(State* state) const; virtual void ComputeCallerState(State* state) const;
@ -513,7 +514,8 @@ class JavaScriptFrame: public StandardFrame {
} }
protected: protected:
inline explicit JavaScriptFrame(StackFrameIterator* iterator); explicit JavaScriptFrame(StackFrameIterator* iterator)
: StandardFrame(iterator) { }
virtual Address GetCallerStackPointer() const; virtual Address GetCallerStackPointer() const;
@ -550,7 +552,8 @@ class OptimizedFrame : public JavaScriptFrame {
DeoptimizationInputData* GetDeoptimizationData(int* deopt_index); DeoptimizationInputData* GetDeoptimizationData(int* deopt_index);
protected: protected:
inline explicit OptimizedFrame(StackFrameIterator* iterator); explicit OptimizedFrame(StackFrameIterator* iterator)
: JavaScriptFrame(iterator) { }
private: private:
friend class StackFrameIterator; friend class StackFrameIterator;
@ -578,9 +581,12 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame {
int index) const; int index) const;
protected: protected:
inline explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator); explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator)
: JavaScriptFrame(iterator) { }
virtual int GetNumberOfIncomingArguments() const; virtual int GetNumberOfIncomingArguments() const {
return Smi::cast(GetExpression(0))->value();
}
virtual Address GetCallerStackPointer() const; virtual Address GetCallerStackPointer() const;
@ -605,7 +611,8 @@ class InternalFrame: public StandardFrame {
} }
protected: protected:
inline explicit InternalFrame(StackFrameIterator* iterator); explicit InternalFrame(StackFrameIterator* iterator)
: StandardFrame(iterator) { }
virtual Address GetCallerStackPointer() const; virtual Address GetCallerStackPointer() const;
@ -626,7 +633,8 @@ class ConstructFrame: public InternalFrame {
} }
protected: protected:
inline explicit ConstructFrame(StackFrameIterator* iterator); explicit ConstructFrame(StackFrameIterator* iterator)
: InternalFrame(iterator) { }
private: private:
friend class StackFrameIterator; friend class StackFrameIterator;
@ -707,19 +715,15 @@ class JavaScriptFrameIteratorTemp BASE_EMBEDDED {
inline JavaScriptFrameIteratorTemp(Isolate* isolate, StackFrame::Id id); inline JavaScriptFrameIteratorTemp(Isolate* isolate, StackFrame::Id id);
JavaScriptFrameIteratorTemp(Address fp, JavaScriptFrameIteratorTemp(Address fp, Address sp,
Address sp, Address low_bound, Address high_bound) :
Address low_bound,
Address high_bound) :
iterator_(fp, sp, low_bound, high_bound) { iterator_(fp, sp, low_bound, high_bound) {
if (!done()) Advance(); if (!done()) Advance();
} }
JavaScriptFrameIteratorTemp(Isolate* isolate, JavaScriptFrameIteratorTemp(Isolate* isolate,
Address fp, Address fp, Address sp,
Address sp, Address low_bound, Address high_bound) :
Address low_bound,
Address high_bound) :
iterator_(isolate, fp, sp, low_bound, high_bound) { iterator_(isolate, fp, sp, low_bound, high_bound) {
if (!done()) Advance(); if (!done()) Advance();
} }

41
deps/v8/src/full-codegen.cc

@ -244,6 +244,11 @@ void BreakableStatementChecker::VisitBinaryOperation(BinaryOperation* expr) {
} }
void BreakableStatementChecker::VisitCompareToNull(CompareToNull* expr) {
Visit(expr->expression());
}
void BreakableStatementChecker::VisitCompareOperation(CompareOperation* expr) { void BreakableStatementChecker::VisitCompareOperation(CompareOperation* expr) {
Visit(expr->left()); Visit(expr->left());
Visit(expr->right()); Visit(expr->right());
@ -286,10 +291,8 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
code->set_optimizable(info->IsOptimizable()); code->set_optimizable(info->IsOptimizable());
cgen.PopulateDeoptimizationData(code); cgen.PopulateDeoptimizationData(code);
code->set_has_deoptimization_support(info->HasDeoptimizationSupport()); code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
#ifdef ENABLE_DEBUGGER_SUPPORT
code->set_has_debug_break_slots( code->set_has_debug_break_slots(
info->isolate()->debugger()->IsDebuggerActive()); info->isolate()->debugger()->IsDebuggerActive());
#endif // ENABLE_DEBUGGER_SUPPORT
code->set_allow_osr_at_loop_nesting_level(0); code->set_allow_osr_at_loop_nesting_level(0);
code->set_stack_check_table_offset(table_offset); code->set_stack_check_table_offset(table_offset);
CodeGenerator::PrintCode(code, info); CodeGenerator::PrintCode(code, info);
@ -820,19 +823,9 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
if (stmt->block_scope() != NULL) { if (stmt->block_scope() != NULL) {
{ Comment cmnt(masm_, "[ Extend block context"); { Comment cmnt(masm_, "[ Extend block context");
scope_ = stmt->block_scope(); scope_ = stmt->block_scope();
Handle<SerializedScopeInfo> scope_info = scope_->GetSerializedScopeInfo(); __ Push(scope_->GetSerializedScopeInfo());
int heap_slots =
scope_info->NumberOfContextSlots() - Context::MIN_CONTEXT_SLOTS;
__ Push(scope_info);
PushFunctionArgumentForContextAllocation(); PushFunctionArgumentForContextAllocation();
if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) { __ CallRuntime(Runtime::kPushBlockContext, 2);
FastNewBlockContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kPushBlockContext, 2);
}
// Replace the context stored in the frame.
StoreToFrameField(StandardFrameConstants::kContextOffset, StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register()); context_register());
} }
@ -1328,21 +1321,19 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryCatch::Exit(
} }
bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) { bool FullCodeGenerator::TryLiteralCompare(CompareOperation* compare,
Expression *sub_expr; Label* if_true,
Label* if_false,
Label* fall_through) {
Expression *expr;
Handle<String> check; Handle<String> check;
if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) { if (compare->IsLiteralCompareTypeof(&expr, &check)) {
EmitLiteralCompareTypeof(sub_expr, check); EmitLiteralCompareTypeof(expr, check, if_true, if_false, fall_through);
return true;
}
if (expr->IsLiteralCompareUndefined(&sub_expr)) {
EmitLiteralCompareNil(expr, sub_expr, kUndefinedValue);
return true; return true;
} }
if (expr->IsLiteralCompareNull(&sub_expr)) { if (compare->IsLiteralCompareUndefined(&expr)) {
EmitLiteralCompareNil(expr, sub_expr, kNullValue); EmitLiteralCompareUndefined(expr, if_true, if_false, fall_through);
return true; return true;
} }

23
deps/v8/src/full-codegen.h

@ -391,16 +391,25 @@ class FullCodeGenerator: public AstVisitor {
// Try to perform a comparison as a fast inlined literal compare if // Try to perform a comparison as a fast inlined literal compare if
// the operands allow it. Returns true if the compare operations // the operands allow it. Returns true if the compare operations
// has been matched and all code generated; false otherwise. // has been matched and all code generated; false otherwise.
bool TryLiteralCompare(CompareOperation* compare); bool TryLiteralCompare(CompareOperation* compare,
Label* if_true,
Label* if_false,
Label* fall_through);
// Platform-specific code for comparing the type of a value with // Platform-specific code for comparing the type of a value with
// a given literal string. // a given literal string.
void EmitLiteralCompareTypeof(Expression* expr, Handle<String> check); void EmitLiteralCompareTypeof(Expression* expr,
Handle<String> check,
// Platform-specific code for equality comparison with a nil-like value. Label* if_true,
void EmitLiteralCompareNil(CompareOperation* expr, Label* if_false,
Expression* sub_expr, Label* fall_through);
NilValue nil);
// Platform-specific code for strict equality comparison with
// the undefined value.
void EmitLiteralCompareUndefined(Expression* expr,
Label* if_true,
Label* if_false,
Label* fall_through);
// Bailout support. // Bailout support.
void PrepareForBailout(Expression* node, State state); void PrepareForBailout(Expression* node, State state);

6
deps/v8/src/func-name-inferrer.h

@ -70,12 +70,6 @@ class FuncNameInferrer : public ZoneObject {
} }
} }
void RemoveLastFunction() {
if (IsOpen() && !funcs_to_infer_.is_empty()) {
funcs_to_infer_.RemoveLast();
}
}
// Infers a function name and leaves names collection state. // Infers a function name and leaves names collection state.
void Infer() { void Infer() {
ASSERT(IsOpen()); ASSERT(IsOpen());

4
deps/v8/src/globals.h

@ -255,10 +255,6 @@ const int kBinary32MinExponent = 0x01;
const int kBinary32MantissaBits = 23; const int kBinary32MantissaBits = 23;
const int kBinary32ExponentShift = 23; const int kBinary32ExponentShift = 23;
// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
// other bits set.
const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
// ASCII/UC16 constants // ASCII/UC16 constants
// Code-point values in Unicode 4.0 are 21 bits wide. // Code-point values in Unicode 4.0 are 21 bits wide.
typedef uint16_t uc16; typedef uint16_t uc16;

13
deps/v8/src/handles.cc

@ -421,18 +421,17 @@ Handle<Object> PreventExtensions(Handle<JSObject> object) {
} }
Handle<Object> SetHiddenProperty(Handle<JSObject> obj, Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
Handle<String> key, JSObject::HiddenPropertiesFlag flag) {
Handle<Object> value) {
CALL_HEAP_FUNCTION(obj->GetIsolate(), CALL_HEAP_FUNCTION(obj->GetIsolate(),
obj->SetHiddenProperty(*key, *value), obj->GetHiddenProperties(flag),
Object); Object);
} }
int GetIdentityHash(Handle<JSReceiver> obj) { int GetIdentityHash(Handle<JSObject> obj) {
CALL_AND_RETRY(obj->GetIsolate(), CALL_AND_RETRY(obj->GetIsolate(),
obj->GetIdentityHash(ALLOW_CREATION), obj->GetIdentityHash(JSObject::ALLOW_CREATION),
return Smi::cast(__object__)->value(), return Smi::cast(__object__)->value(),
return 0); return 0);
} }
@ -887,7 +886,7 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table, Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
Handle<JSReceiver> key, Handle<JSObject> key,
Handle<Object> value) { Handle<Object> value) {
CALL_HEAP_FUNCTION(table->GetIsolate(), CALL_HEAP_FUNCTION(table->GetIsolate(),
table->Put(*key, *value), table->Put(*key, *value),

15
deps/v8/src/handles.h

@ -263,13 +263,14 @@ Handle<Object> GetPrototype(Handle<Object> obj);
Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value); Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value);
// Sets a hidden property on an object. Returns obj on success, undefined // Return the object's hidden properties object. If the object has no hidden
// if trying to set the property on a detached proxy. // properties and HiddenPropertiesFlag::ALLOW_CREATION is passed, then a new
Handle<Object> SetHiddenProperty(Handle<JSObject> obj, // hidden property object will be allocated. Otherwise Heap::undefined_value
Handle<String> key, // is returned.
Handle<Object> value); Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
JSObject::HiddenPropertiesFlag flag);
int GetIdentityHash(Handle<JSReceiver> obj); int GetIdentityHash(Handle<JSObject> obj);
Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index); Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
Handle<Object> DeleteProperty(Handle<JSObject> obj, Handle<String> prop); Handle<Object> DeleteProperty(Handle<JSObject> obj, Handle<String> prop);
@ -347,7 +348,7 @@ Handle<Object> SetPrototype(Handle<JSFunction> function,
Handle<Object> PreventExtensions(Handle<JSObject> object); Handle<Object> PreventExtensions(Handle<JSObject> object);
Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table, Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
Handle<JSReceiver> key, Handle<JSObject> key,
Handle<Object> value); Handle<Object> value);
// Does lazy compilation of the given function. Returns true on success and // Does lazy compilation of the given function. Returns true on success and

104
deps/v8/src/heap-inl.h

@ -33,26 +33,15 @@
#include "list-inl.h" #include "list-inl.h"
#include "objects.h" #include "objects.h"
#include "v8-counters.h" #include "v8-counters.h"
#include "store-buffer.h"
#include "store-buffer-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
void PromotionQueue::insert(HeapObject* target, int size) { void PromotionQueue::insert(HeapObject* target, int size) {
if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) {
NewSpacePage* rear_page =
NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
ASSERT(!rear_page->prev_page()->is_anchor());
rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->body_limit());
}
*(--rear_) = reinterpret_cast<intptr_t>(target); *(--rear_) = reinterpret_cast<intptr_t>(target);
*(--rear_) = size; *(--rear_) = size;
// Assert no overflow into live objects. // Assert no overflow into live objects.
#ifdef DEBUG ASSERT(reinterpret_cast<Address>(rear_) >= HEAP->new_space()->top());
SemiSpace::AssertValidRange(HEAP->new_space()->top(),
reinterpret_cast<Address>(rear_));
#endif
} }
@ -95,7 +84,7 @@ MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
// Allocate string. // Allocate string.
Object* result; Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace()) { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
? lo_space_->AllocateRaw(size, NOT_EXECUTABLE) ? lo_space_->AllocateRaw(size)
: old_data_space_->AllocateRaw(size); : old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result; if (!maybe_result->ToObject(&result)) return maybe_result;
} }
@ -128,7 +117,7 @@ MaybeObject* Heap::AllocateTwoByteSymbol(Vector<const uc16> str,
// Allocate string. // Allocate string.
Object* result; Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace()) { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
? lo_space_->AllocateRaw(size, NOT_EXECUTABLE) ? lo_space_->AllocateRaw(size)
: old_data_space_->AllocateRaw(size); : old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result; if (!maybe_result->ToObject(&result)) return maybe_result;
} }
@ -192,7 +181,7 @@ MaybeObject* Heap::AllocateRaw(int size_in_bytes,
} else if (CODE_SPACE == space) { } else if (CODE_SPACE == space) {
result = code_space_->AllocateRaw(size_in_bytes); result = code_space_->AllocateRaw(size_in_bytes);
} else if (LO_SPACE == space) { } else if (LO_SPACE == space) {
result = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE); result = lo_space_->AllocateRaw(size_in_bytes);
} else if (CELL_SPACE == space) { } else if (CELL_SPACE == space) {
result = cell_space_->AllocateRaw(size_in_bytes); result = cell_space_->AllocateRaw(size_in_bytes);
} else { } else {
@ -276,11 +265,6 @@ bool Heap::InNewSpace(Object* object) {
} }
bool Heap::InNewSpace(Address addr) {
return new_space_.Contains(addr);
}
bool Heap::InFromSpace(Object* object) { bool Heap::InFromSpace(Object* object) {
return new_space_.FromSpaceContains(object); return new_space_.FromSpaceContains(object);
} }
@ -291,36 +275,29 @@ bool Heap::InToSpace(Object* object) {
} }
bool Heap::OldGenerationAllocationLimitReached() {
if (!incremental_marking()->IsStopped()) return false;
return OldGenerationSpaceAvailable() < 0;
}
bool Heap::ShouldBePromoted(Address old_address, int object_size) { bool Heap::ShouldBePromoted(Address old_address, int object_size) {
// An object should be promoted if: // An object should be promoted if:
// - the object has survived a scavenge operation or // - the object has survived a scavenge operation or
// - to space is already 25% full. // - to space is already 25% full.
NewSpacePage* page = NewSpacePage::FromAddress(old_address); return old_address < new_space_.age_mark()
Address age_mark = new_space_.age_mark(); || (new_space_.Size() + object_size) >= (new_space_.Capacity() >> 2);
bool below_mark = page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
(!page->ContainsLimit(age_mark) || old_address < age_mark);
return below_mark || (new_space_.Size() + object_size) >=
(new_space_.EffectiveCapacity() >> 2);
} }
void Heap::RecordWrite(Address address, int offset) { void Heap::RecordWrite(Address address, int offset) {
if (!InNewSpace(address)) store_buffer_.Mark(address + offset); if (new_space_.Contains(address)) return;
ASSERT(!new_space_.FromSpaceContains(address));
SLOW_ASSERT(Contains(address + offset));
Page::FromAddress(address)->MarkRegionDirty(address + offset);
} }
void Heap::RecordWrites(Address address, int start, int len) { void Heap::RecordWrites(Address address, int start, int len) {
if (!InNewSpace(address)) { if (new_space_.Contains(address)) return;
for (int i = 0; i < len; i++) { ASSERT(!new_space_.FromSpaceContains(address));
store_buffer_.Mark(address + start + i * kPointerSize); Page* page = Page::FromAddress(address);
} page->SetRegionMarks(page->GetRegionMarks() |
} page->GetRegionMaskForSpan(address + start, len * kPointerSize));
} }
@ -366,6 +343,31 @@ void Heap::CopyBlock(Address dst, Address src, int byte_size) {
} }
void Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
Address src,
int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
Page* page = Page::FromAddress(dst);
uint32_t marks = page->GetRegionMarks();
for (int remaining = byte_size / kPointerSize;
remaining > 0;
remaining--) {
Memory::Object_at(dst) = Memory::Object_at(src);
if (InNewSpace(Memory::Object_at(dst))) {
marks |= page->GetRegionMaskForAddress(dst);
}
dst += kPointerSize;
src += kPointerSize;
}
page->SetRegionMarks(marks);
}
void Heap::MoveBlock(Address dst, Address src, int byte_size) { void Heap::MoveBlock(Address dst, Address src, int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize)); ASSERT(IsAligned(byte_size, kPointerSize));
@ -385,6 +387,16 @@ void Heap::MoveBlock(Address dst, Address src, int byte_size) {
} }
void Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
Address src,
int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
ASSERT((dst < src) || (dst >= (src + byte_size)));
CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, byte_size);
}
void Heap::ScavengePointer(HeapObject** p) { void Heap::ScavengePointer(HeapObject** p) {
ScavengeObject(p, *p); ScavengeObject(p, *p);
} }
@ -402,9 +414,7 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
// If the first word is a forwarding address, the object has already been // If the first word is a forwarding address, the object has already been
// copied. // copied.
if (first_word.IsForwardingAddress()) { if (first_word.IsForwardingAddress()) {
HeapObject* dest = first_word.ToForwardingAddress(); *p = first_word.ToForwardingAddress();
ASSERT(HEAP->InFromSpace(*p));
*p = dest;
return; return;
} }
@ -449,7 +459,7 @@ int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
amount_of_external_allocated_memory_ - amount_of_external_allocated_memory_ -
amount_of_external_allocated_memory_at_last_global_gc_; amount_of_external_allocated_memory_at_last_global_gc_;
if (amount_since_last_global_gc > external_allocation_limit_) { if (amount_since_last_global_gc > external_allocation_limit_) {
CollectAllGarbage(kNoGCFlags); CollectAllGarbage(false);
} }
} else { } else {
// Avoid underflow. // Avoid underflow.
@ -466,7 +476,6 @@ void Heap::SetLastScriptId(Object* last_script_id) {
roots_[kLastScriptIdRootIndex] = last_script_id; roots_[kLastScriptIdRootIndex] = last_script_id;
} }
Isolate* Heap::isolate() { Isolate* Heap::isolate() {
return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) - return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) -
reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4); reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4);
@ -679,6 +688,15 @@ Heap* _inline_get_heap_() {
} }
void MarkCompactCollector::SetMark(HeapObject* obj) {
tracer_->increment_marked_count();
#ifdef DEBUG
UpdateLiveObjectCount(obj);
#endif
obj->SetMark();
}
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_HEAP_INL_H_ #endif // V8_HEAP_INL_H_

1
deps/v8/src/heap-profiler.cc

@ -114,6 +114,7 @@ HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name,
bool generation_completed = true; bool generation_completed = true;
switch (s_type) { switch (s_type) {
case HeapSnapshot::kFull: { case HeapSnapshot::kFull: {
HEAP->CollectAllGarbage(true);
HeapSnapshotGenerator generator(result, control); HeapSnapshotGenerator generator(result, control);
generation_completed = generator.GenerateSnapshot(); generation_completed = generator.GenerateSnapshot();
break; break;

1531
deps/v8/src/heap.cc

File diff suppressed because it is too large

458
deps/v8/src/heap.h

@ -32,15 +32,11 @@
#include "allocation.h" #include "allocation.h"
#include "globals.h" #include "globals.h"
#include "incremental-marking.h"
#include "list.h" #include "list.h"
#include "mark-compact.h" #include "mark-compact.h"
#include "objects-visiting.h"
#include "spaces.h" #include "spaces.h"
#include "splay-tree-inl.h" #include "splay-tree-inl.h"
#include "store-buffer.h"
#include "v8-counters.h" #include "v8-counters.h"
#include "v8globals.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -52,20 +48,20 @@ inline Heap* _inline_get_heap_();
// Defines all the roots in Heap. // Defines all the roots in Heap.
#define STRONG_ROOT_LIST(V) \ #define STRONG_ROOT_LIST(V) \
/* Put the byte array map early. We need it to be in place by the time */ \
/* the deserializer hits the next page, since it wants to put a byte */ \
/* array in the unused space at the end of the page. */ \
V(Map, byte_array_map, ByteArrayMap) \ V(Map, byte_array_map, ByteArrayMap) \
V(Map, free_space_map, FreeSpaceMap) \
V(Map, one_pointer_filler_map, OnePointerFillerMap) \ V(Map, one_pointer_filler_map, OnePointerFillerMap) \
V(Map, two_pointer_filler_map, TwoPointerFillerMap) \ V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
/* Cluster the most popular ones in a few cache lines here at the top. */ \ /* Cluster the most popular ones in a few cache lines here at the top. */ \
V(Smi, store_buffer_top, StoreBufferTop) \ V(Object, undefined_value, UndefinedValue) \
V(Oddball, undefined_value, UndefinedValue) \ V(Object, the_hole_value, TheHoleValue) \
V(Oddball, the_hole_value, TheHoleValue) \ V(Object, null_value, NullValue) \
V(Oddball, null_value, NullValue) \ V(Object, true_value, TrueValue) \
V(Oddball, true_value, TrueValue) \ V(Object, false_value, FalseValue) \
V(Oddball, false_value, FalseValue) \ V(Object, arguments_marker, ArgumentsMarker) \
V(Oddball, arguments_marker, ArgumentsMarker) \
V(Oddball, frame_alignment_marker, FrameAlignmentMarker) \
V(Map, heap_number_map, HeapNumberMap) \ V(Map, heap_number_map, HeapNumberMap) \
V(Map, global_context_map, GlobalContextMap) \ V(Map, global_context_map, GlobalContextMap) \
V(Map, fixed_array_map, FixedArrayMap) \ V(Map, fixed_array_map, FixedArrayMap) \
@ -126,9 +122,8 @@ inline Heap* _inline_get_heap_();
V(Map, shared_function_info_map, SharedFunctionInfoMap) \ V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, message_object_map, JSMessageObjectMap) \ V(Map, message_object_map, JSMessageObjectMap) \
V(Map, foreign_map, ForeignMap) \ V(Map, foreign_map, ForeignMap) \
V(HeapNumber, nan_value, NanValue) \ V(Object, nan_value, NanValue) \
V(HeapNumber, infinity_value, InfinityValue) \ V(Object, minus_zero_value, MinusZeroValue) \
V(HeapNumber, minus_zero_value, MinusZeroValue) \
V(Map, neander_map, NeanderMap) \ V(Map, neander_map, NeanderMap) \
V(JSObject, message_listeners, MessageListeners) \ V(JSObject, message_listeners, MessageListeners) \
V(Foreign, prototype_accessors, PrototypeAccessors) \ V(Foreign, prototype_accessors, PrototypeAccessors) \
@ -231,9 +226,7 @@ inline Heap* _inline_get_heap_();
V(closure_symbol, "(closure)") \ V(closure_symbol, "(closure)") \
V(use_strict, "use strict") \ V(use_strict, "use strict") \
V(dot_symbol, ".") \ V(dot_symbol, ".") \
V(anonymous_function_symbol, "(anonymous function)") \ V(anonymous_function_symbol, "(anonymous function)")
V(infinity_symbol, "Infinity") \
V(minus_infinity_symbol, "-Infinity")
// Forward declarations. // Forward declarations.
class GCTracer; class GCTracer;
@ -245,26 +238,10 @@ class WeakObjectRetainer;
typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap, typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
Object** pointer); Object** pointer);
class StoreBufferRebuilder { typedef bool (*DirtyRegionCallback)(Heap* heap,
public: Address start,
explicit StoreBufferRebuilder(StoreBuffer* store_buffer) Address end,
: store_buffer_(store_buffer) { ObjectSlotCallback copy_object_func);
}
void Callback(MemoryChunk* page, StoreBufferEvent event);
private:
StoreBuffer* store_buffer_;
// We record in this variable how full the store buffer was when we started
// iterating over the current page, finding pointers to new space. If the
// store buffer overflows again we can exempt the page from the store buffer
// by rewinding to this point instead of having to search the store buffer.
Object*** start_of_current_page_;
// The current page we are scanning in the store buffer iterator.
MemoryChunk* current_page_;
};
// The all static Heap captures the interface to the global object heap. // The all static Heap captures the interface to the global object heap.
@ -282,37 +259,22 @@ class PromotionQueue {
PromotionQueue() : front_(NULL), rear_(NULL) { } PromotionQueue() : front_(NULL), rear_(NULL) { }
void Initialize(Address start_address) { void Initialize(Address start_address) {
// Assumes that a NewSpacePage exactly fits a number of promotion queue
// entries (where each is a pair of intptr_t). This allows us to simplify
// the test fpr when to switch pages.
ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
== 0);
ASSERT(NewSpacePage::IsAtEnd(start_address));
front_ = rear_ = reinterpret_cast<intptr_t*>(start_address); front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
} }
bool is_empty() { return front_ == rear_; } bool is_empty() { return front_ <= rear_; }
inline void insert(HeapObject* target, int size); inline void insert(HeapObject* target, int size);
void remove(HeapObject** target, int* size) { void remove(HeapObject** target, int* size) {
ASSERT(!is_empty());
if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
NewSpacePage* front_page =
NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
ASSERT(!front_page->prev_page()->is_anchor());
front_ =
reinterpret_cast<intptr_t*>(front_page->prev_page()->body_limit());
}
*target = reinterpret_cast<HeapObject*>(*(--front_)); *target = reinterpret_cast<HeapObject*>(*(--front_));
*size = static_cast<int>(*(--front_)); *size = static_cast<int>(*(--front_));
// Assert no underflow. // Assert no underflow.
SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_), ASSERT(front_ >= rear_);
reinterpret_cast<Address>(front_));
} }
private: private:
// The front of the queue is higher in the memory page chain than the rear. // The front of the queue is higher in memory than the rear.
intptr_t* front_; intptr_t* front_;
intptr_t* rear_; intptr_t* rear_;
@ -320,11 +282,6 @@ class PromotionQueue {
}; };
typedef void (*ScavengingCallback)(Map* map,
HeapObject** slot,
HeapObject* object);
// External strings table is a place where all external strings are // External strings table is a place where all external strings are
// registered. We need to keep track of such strings to properly // registered. We need to keep track of such strings to properly
// finalize them. // finalize them.
@ -370,8 +327,8 @@ class Heap {
// Configure heap size before setup. Return false if the heap has been // Configure heap size before setup. Return false if the heap has been
// setup already. // setup already.
bool ConfigureHeap(int max_semispace_size, bool ConfigureHeap(int max_semispace_size,
intptr_t max_old_gen_size, int max_old_gen_size,
intptr_t max_executable_size); int max_executable_size);
bool ConfigureHeapDefault(); bool ConfigureHeapDefault();
// Initializes the global object heap. If create_heap_objects is true, // Initializes the global object heap. If create_heap_objects is true,
@ -499,7 +456,6 @@ class Heap {
// size, but keeping the original prototype. The receiver must have at least // size, but keeping the original prototype. The receiver must have at least
// the size of the new object. The object is reinitialized and behaves as an // the size of the new object. The object is reinitialized and behaves as an
// object that has been freshly allocated. // object that has been freshly allocated.
// Returns failure if an error occured, otherwise object.
MUST_USE_RESULT MaybeObject* ReinitializeJSReceiver(JSReceiver* object, MUST_USE_RESULT MaybeObject* ReinitializeJSReceiver(JSReceiver* object,
InstanceType type, InstanceType type,
int size); int size);
@ -528,10 +484,8 @@ class Heap {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this function does not perform a garbage collection. // Please note this function does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateMap( MUST_USE_RESULT MaybeObject* AllocateMap(InstanceType instance_type,
InstanceType instance_type, int instance_size);
int instance_size,
ElementsKind elements_kind = FAST_ELEMENTS);
// Allocates a partial map for bootstrapping. // Allocates a partial map for bootstrapping.
MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type, MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,
@ -842,9 +796,9 @@ class Heap {
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateExternalStringFromAscii( MUST_USE_RESULT MaybeObject* AllocateExternalStringFromAscii(
const ExternalAsciiString::Resource* resource); ExternalAsciiString::Resource* resource);
MUST_USE_RESULT MaybeObject* AllocateExternalStringFromTwoByte( MUST_USE_RESULT MaybeObject* AllocateExternalStringFromTwoByte(
const ExternalTwoByteString::Resource* resource); ExternalTwoByteString::Resource* resource);
// Finalizes an external string by deleting the associated external // Finalizes an external string by deleting the associated external
// data and clearing the resource pointer. // data and clearing the resource pointer.
@ -931,24 +885,13 @@ class Heap {
// collect more garbage. // collect more garbage.
inline bool CollectGarbage(AllocationSpace space); inline bool CollectGarbage(AllocationSpace space);
static const int kNoGCFlags = 0; // Performs a full garbage collection. Force compaction if the
static const int kMakeHeapIterableMask = 1; // parameter is true.
void CollectAllGarbage(bool force_compaction);
// Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
// non-zero, then the slower precise sweeper is used, which leaves the heap
// in a state where we can iterate over the heap visiting all objects.
void CollectAllGarbage(int flags);
// Last hope GC, should try to squeeze as much as possible. // Last hope GC, should try to squeeze as much as possible.
void CollectAllAvailableGarbage(); void CollectAllAvailableGarbage();
// Check whether the heap is currently iterable.
bool IsHeapIterable();
// Ensure that we have swept all spaces in such a way that we can iterate
// over all objects. May cause a GC.
void EnsureHeapIsIterable();
// Notify the heap that a context has been disposed. // Notify the heap that a context has been disposed.
int NotifyContextDisposed() { return ++contexts_disposed_; } int NotifyContextDisposed() { return ++contexts_disposed_; }
@ -956,20 +899,6 @@ class Heap {
// ensure correct callback for weak global handles. // ensure correct callback for weak global handles.
void PerformScavenge(); void PerformScavenge();
inline void increment_scan_on_scavenge_pages() {
scan_on_scavenge_pages_++;
if (FLAG_gc_verbose) {
PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
}
}
inline void decrement_scan_on_scavenge_pages() {
scan_on_scavenge_pages_--;
if (FLAG_gc_verbose) {
PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
}
}
PromotionQueue* promotion_queue() { return &promotion_queue_; } PromotionQueue* promotion_queue() { return &promotion_queue_; }
#ifdef DEBUG #ifdef DEBUG
@ -996,8 +925,6 @@ class Heap {
// Heap root getters. We have versions with and without type::cast() here. // Heap root getters. We have versions with and without type::cast() here.
// You can't use type::cast during GC because the assert fails. // You can't use type::cast during GC because the assert fails.
// TODO(1490): Try removing the unchecked accessors, now that GC marking does
// not corrupt the stack.
#define ROOT_ACCESSOR(type, name, camel_name) \ #define ROOT_ACCESSOR(type, name, camel_name) \
type* name() { \ type* name() { \
return type::cast(roots_[k##camel_name##RootIndex]); \ return type::cast(roots_[k##camel_name##RootIndex]); \
@ -1038,16 +965,60 @@ class Heap {
// Iterates over all the other roots in the heap. // Iterates over all the other roots in the heap.
void IterateWeakRoots(ObjectVisitor* v, VisitMode mode); void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
enum ExpectedPageWatermarkState {
WATERMARK_SHOULD_BE_VALID,
WATERMARK_CAN_BE_INVALID
};
// For each dirty region on a page in use from an old space call
// visit_dirty_region callback.
// If either visit_dirty_region or callback can cause an allocation
// in old space and changes in allocation watermark then
// can_preallocate_during_iteration should be set to true.
// All pages will be marked as having invalid watermark upon
// iteration completion.
void IterateDirtyRegions(
PagedSpace* space,
DirtyRegionCallback visit_dirty_region,
ObjectSlotCallback callback,
ExpectedPageWatermarkState expected_page_watermark_state);
// Interpret marks as a bitvector of dirty marks for regions of size
// Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering
// memory interval from start to top. For each dirty region call a
// visit_dirty_region callback. Return updated bitvector of dirty marks.
uint32_t IterateDirtyRegions(uint32_t marks,
Address start,
Address end,
DirtyRegionCallback visit_dirty_region,
ObjectSlotCallback callback);
// Iterate pointers to from semispace of new space found in memory interval // Iterate pointers to from semispace of new space found in memory interval
// from start to end. // from start to end.
// Update dirty marks for page containing start address.
void IterateAndMarkPointersToFromSpace(Address start, void IterateAndMarkPointersToFromSpace(Address start,
Address end, Address end,
ObjectSlotCallback callback); ObjectSlotCallback callback);
// Iterate pointers to new space found in memory interval from start to end.
// Return true if pointers to new space was found.
static bool IteratePointersInDirtyRegion(Heap* heap,
Address start,
Address end,
ObjectSlotCallback callback);
// Iterate pointers to new space found in memory interval from start to end.
// This interval is considered to belong to the map space.
// Return true if pointers to new space was found.
static bool IteratePointersInDirtyMapsRegion(Heap* heap,
Address start,
Address end,
ObjectSlotCallback callback);
// Returns whether the object resides in new space. // Returns whether the object resides in new space.
inline bool InNewSpace(Object* object); inline bool InNewSpace(Object* object);
inline bool InNewSpace(Address addr);
inline bool InNewSpacePage(Address addr);
inline bool InFromSpace(Object* object); inline bool InFromSpace(Object* object);
inline bool InToSpace(Object* object); inline bool InToSpace(Object* object);
@ -1086,20 +1057,12 @@ class Heap {
roots_[kEmptyScriptRootIndex] = script; roots_[kEmptyScriptRootIndex] = script;
} }
void public_set_store_buffer_top(Address* top) {
roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
}
// Update the next script id. // Update the next script id.
inline void SetLastScriptId(Object* last_script_id); inline void SetLastScriptId(Object* last_script_id);
// Generated code can embed this address to get access to the roots. // Generated code can embed this address to get access to the roots.
Object** roots_address() { return roots_; } Object** roots_address() { return roots_; }
Address* store_buffer_top_address() {
return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
}
// Get address of global contexts list for serialization support. // Get address of global contexts list for serialization support.
Object** global_contexts_list_address() { Object** global_contexts_list_address() {
return &global_contexts_list_; return &global_contexts_list_;
@ -1112,10 +1075,6 @@ class Heap {
// Verify the heap is in its normal state before or after a GC. // Verify the heap is in its normal state before or after a GC.
void Verify(); void Verify();
void OldPointerSpaceCheckStoreBuffer();
void MapSpaceCheckStoreBuffer();
void LargeObjectSpaceCheckStoreBuffer();
// Report heap statistics. // Report heap statistics.
void ReportHeapStatistics(const char* title); void ReportHeapStatistics(const char* title);
void ReportCodeStatistics(const char* title); void ReportCodeStatistics(const char* title);
@ -1211,51 +1170,22 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length, MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length,
PretenureFlag pretenure); PretenureFlag pretenure);
inline intptr_t PromotedTotalSize() {
return PromotedSpaceSize() + PromotedExternalMemorySize();
}
// True if we have reached the allocation limit in the old generation that // True if we have reached the allocation limit in the old generation that
// should force the next GC (caused normally) to be a full one. // should force the next GC (caused normally) to be a full one.
inline bool OldGenerationPromotionLimitReached() { bool OldGenerationPromotionLimitReached() {
return PromotedTotalSize() > old_gen_promotion_limit_; return (PromotedSpaceSize() + PromotedExternalMemorySize())
} > old_gen_promotion_limit_;
inline intptr_t OldGenerationSpaceAvailable() {
return old_gen_allocation_limit_ - PromotedTotalSize();
} }
static const intptr_t kMinimumPromotionLimit = 5 * Page::kPageSize; intptr_t OldGenerationSpaceAvailable() {
static const intptr_t kMinimumAllocationLimit = return old_gen_allocation_limit_ -
8 * (Page::kPageSize > MB ? Page::kPageSize : MB); (PromotedSpaceSize() + PromotedExternalMemorySize());
// When we sweep lazily we initially guess that there is no garbage on the
// heap and set the limits for the next GC accordingly. As we sweep we find
// out that some of the pages contained garbage and we have to adjust
// downwards the size of the heap. This means the limits that control the
// timing of the next GC also need to be adjusted downwards.
void LowerOldGenLimits(intptr_t adjustment) {
size_of_old_gen_at_last_old_space_gc_ -= adjustment;
old_gen_promotion_limit_ =
OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
old_gen_allocation_limit_ =
OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
} }
intptr_t OldGenPromotionLimit(intptr_t old_gen_size) { // True if we have reached the allocation limit in the old generation that
intptr_t limit = // should artificially cause a GC right now.
Max(old_gen_size + old_gen_size / 3, kMinimumPromotionLimit); bool OldGenerationAllocationLimitReached() {
limit += new_space_.Capacity(); return OldGenerationSpaceAvailable() < 0;
limit *= old_gen_limit_factor_;
return limit;
}
intptr_t OldGenAllocationLimit(intptr_t old_gen_size) {
intptr_t limit =
Max(old_gen_size + old_gen_size / 2, kMinimumAllocationLimit);
limit += new_space_.Capacity();
limit *= old_gen_limit_factor_;
return limit;
} }
// Can be called when the embedding application is idle. // Can be called when the embedding application is idle.
@ -1283,8 +1213,6 @@ class Heap {
MUST_USE_RESULT MaybeObject* NumberToString( MUST_USE_RESULT MaybeObject* NumberToString(
Object* number, bool check_number_string_cache = true); Object* number, bool check_number_string_cache = true);
MUST_USE_RESULT MaybeObject* Uint32ToString(
uint32_t value, bool check_number_string_cache = true);
Map* MapForExternalArrayType(ExternalArrayType array_type); Map* MapForExternalArrayType(ExternalArrayType array_type);
RootListIndex RootIndexForExternalArrayType( RootListIndex RootIndexForExternalArrayType(
@ -1296,10 +1224,18 @@ class Heap {
// by pointer size. // by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size); static inline void CopyBlock(Address dst, Address src, int byte_size);
inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
Address src,
int byte_size);
// Optimized version of memmove for blocks with pointer size aligned sizes and // Optimized version of memmove for blocks with pointer size aligned sizes and
// pointer size aligned addresses. // pointer size aligned addresses.
static inline void MoveBlock(Address dst, Address src, int byte_size); static inline void MoveBlock(Address dst, Address src, int byte_size);
inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
Address src,
int byte_size);
// Check new space expansion criteria and expand semispaces if it was hit. // Check new space expansion criteria and expand semispaces if it was hit.
void CheckNewSpaceExpansionCriteria(); void CheckNewSpaceExpansionCriteria();
@ -1308,31 +1244,9 @@ class Heap {
survived_since_last_expansion_ += survived; survived_since_last_expansion_ += survived;
} }
inline bool NextGCIsLikelyToBeFull() {
if (FLAG_gc_global) return true;
intptr_t total_promoted = PromotedTotalSize();
intptr_t adjusted_promotion_limit =
old_gen_promotion_limit_ - new_space_.Capacity();
if (total_promoted >= adjusted_promotion_limit) return true;
intptr_t adjusted_allocation_limit =
old_gen_allocation_limit_ - new_space_.Capacity() / 5;
if (PromotedSpaceSize() >= adjusted_allocation_limit) return true;
return false;
}
void UpdateNewSpaceReferencesInExternalStringTable( void UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func); ExternalStringTableUpdaterCallback updater_func);
void UpdateReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func);
void ProcessWeakReferences(WeakObjectRetainer* retainer); void ProcessWeakReferences(WeakObjectRetainer* retainer);
// Helper function that governs the promotion policy from new space to // Helper function that governs the promotion policy from new space to
@ -1349,9 +1263,6 @@ class Heap {
GCTracer* tracer() { return tracer_; } GCTracer* tracer() { return tracer_; }
// Returns the size of objects residing in non new spaces.
intptr_t PromotedSpaceSize();
double total_regexp_code_generated() { return total_regexp_code_generated_; } double total_regexp_code_generated() { return total_regexp_code_generated_; }
void IncreaseTotalRegexpCodeGenerated(int size) { void IncreaseTotalRegexpCodeGenerated(int size) {
total_regexp_code_generated_ += size; total_regexp_code_generated_ += size;
@ -1370,18 +1281,6 @@ class Heap {
return &mark_compact_collector_; return &mark_compact_collector_;
} }
StoreBuffer* store_buffer() {
return &store_buffer_;
}
Marking* marking() {
return &marking_;
}
IncrementalMarking* incremental_marking() {
return &incremental_marking_;
}
ExternalStringTable* external_string_table() { ExternalStringTable* external_string_table() {
return &external_string_table_; return &external_string_table_;
} }
@ -1392,28 +1291,16 @@ class Heap {
} }
inline Isolate* isolate(); inline Isolate* isolate();
bool is_safe_to_read_maps() { return is_safe_to_read_maps_; }
inline void CallGlobalGCPrologueCallback() { void CallGlobalGCPrologueCallback() {
if (global_gc_prologue_callback_ != NULL) global_gc_prologue_callback_(); if (global_gc_prologue_callback_ != NULL) global_gc_prologue_callback_();
} }
inline void CallGlobalGCEpilogueCallback() { void CallGlobalGCEpilogueCallback() {
if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_(); if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_();
} }
inline bool OldGenerationAllocationLimitReached();
inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
}
void QueueMemoryChunkForFree(MemoryChunk* chunk);
void FreeQueuedChunks();
// Completely clear the Instanceof cache (to stop it keeping objects alive
// around a GC).
inline void CompletelyClearInstanceofCache();
private: private:
Heap(); Heap();
@ -1421,12 +1308,12 @@ class Heap {
// more expedient to get at the isolate directly from within Heap methods. // more expedient to get at the isolate directly from within Heap methods.
Isolate* isolate_; Isolate* isolate_;
intptr_t code_range_size_;
int reserved_semispace_size_; int reserved_semispace_size_;
int max_semispace_size_; int max_semispace_size_;
int initial_semispace_size_; int initial_semispace_size_;
intptr_t max_old_generation_size_; intptr_t max_old_generation_size_;
intptr_t max_executable_size_; intptr_t max_executable_size_;
intptr_t code_range_size_;
// For keeping track of how much data has survived // For keeping track of how much data has survived
// scavenge since last new space expansion. // scavenge since last new space expansion.
@ -1441,8 +1328,6 @@ class Heap {
// For keeping track of context disposals. // For keeping track of context disposals.
int contexts_disposed_; int contexts_disposed_;
int scan_on_scavenge_pages_;
#if defined(V8_TARGET_ARCH_X64) #if defined(V8_TARGET_ARCH_X64)
static const int kMaxObjectSizeInNewSpace = 1024*KB; static const int kMaxObjectSizeInNewSpace = 1024*KB;
#else #else
@ -1459,9 +1344,13 @@ class Heap {
HeapState gc_state_; HeapState gc_state_;
int gc_post_processing_depth_; int gc_post_processing_depth_;
// Returns the size of object residing in non new spaces.
intptr_t PromotedSpaceSize();
// Returns the amount of external memory registered since last global gc. // Returns the amount of external memory registered since last global gc.
int PromotedExternalMemorySize(); int PromotedExternalMemorySize();
int mc_count_; // how many mark-compact collections happened
int ms_count_; // how many mark-sweep collections happened int ms_count_; // how many mark-sweep collections happened
unsigned int gc_count_; // how many gc happened unsigned int gc_count_; // how many gc happened
@ -1500,13 +1389,6 @@ class Heap {
// every allocation in large object space. // every allocation in large object space.
intptr_t old_gen_allocation_limit_; intptr_t old_gen_allocation_limit_;
// Sometimes the heuristics dictate that those limits are increased. This
// variable records that fact.
int old_gen_limit_factor_;
// Used to adjust the limits that control the timing of the next GC.
intptr_t size_of_old_gen_at_last_old_space_gc_;
// Limit on the amount of externally allocated memory allowed // Limit on the amount of externally allocated memory allowed
// between global GCs. If reached a global GC is forced. // between global GCs. If reached a global GC is forced.
intptr_t external_allocation_limit_; intptr_t external_allocation_limit_;
@ -1526,8 +1408,6 @@ class Heap {
Object* global_contexts_list_; Object* global_contexts_list_;
StoreBufferRebuilder store_buffer_rebuilder_;
struct StringTypeTable { struct StringTypeTable {
InstanceType type; InstanceType type;
int size; int size;
@ -1585,11 +1465,13 @@ class Heap {
// Support for computing object sizes during GC. // Support for computing object sizes during GC.
HeapObjectCallback gc_safe_size_of_old_object_; HeapObjectCallback gc_safe_size_of_old_object_;
static int GcSafeSizeOfOldObject(HeapObject* object); static int GcSafeSizeOfOldObject(HeapObject* object);
static int GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object);
// Update the GC state. Called from the mark-compact collector. // Update the GC state. Called from the mark-compact collector.
void MarkMapPointersAsEncoded(bool encoded) { void MarkMapPointersAsEncoded(bool encoded) {
ASSERT(!encoded); gc_safe_size_of_old_object_ = encoded
gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject; ? &GcSafeSizeOfOldObjectWithEncodedMap
: &GcSafeSizeOfOldObject;
} }
// Checks whether a global GC is necessary // Checks whether a global GC is necessary
@ -1601,10 +1483,11 @@ class Heap {
bool PerformGarbageCollection(GarbageCollector collector, bool PerformGarbageCollection(GarbageCollector collector,
GCTracer* tracer); GCTracer* tracer);
static const intptr_t kMinimumPromotionLimit = 2 * MB;
static const intptr_t kMinimumAllocationLimit = 8 * MB;
inline void UpdateOldSpaceLimits(); inline void UpdateOldSpaceLimits();
// Allocate an uninitialized object in map space. The behavior is identical // Allocate an uninitialized object in map space. The behavior is identical
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
// have to test the allocation space argument and (b) can reduce code size // have to test the allocation space argument and (b) can reduce code size
@ -1639,6 +1522,8 @@ class Heap {
// Allocate empty fixed double array. // Allocate empty fixed double array.
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray(); MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();
void SwitchScavengingVisitorsTableIfProfilingWasEnabled();
// Performs a minor collection in new generation. // Performs a minor collection in new generation.
void Scavenge(); void Scavenge();
@ -1647,15 +1532,16 @@ class Heap {
Object** pointer); Object** pointer);
Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front); Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
static void ScavengeStoreBufferCallback(Heap* heap,
MemoryChunk* page,
StoreBufferEvent event);
// Performs a major collection in the whole heap. // Performs a major collection in the whole heap.
void MarkCompact(GCTracer* tracer); void MarkCompact(GCTracer* tracer);
// Code to be run before and after mark-compact. // Code to be run before and after mark-compact.
void MarkCompactPrologue(); void MarkCompactPrologue(bool is_compacting);
// Completely clear the Instanceof cache (to stop it keeping objects alive
// around a GC).
inline void CompletelyClearInstanceofCache();
// Record statistics before and after garbage collection. // Record statistics before and after garbage collection.
void ReportStatisticsBeforeGC(); void ReportStatisticsBeforeGC();
@ -1665,11 +1551,12 @@ class Heap {
static void ScavengeObjectSlow(HeapObject** p, HeapObject* object); static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
// Initializes a function with a shared part and prototype. // Initializes a function with a shared part and prototype.
// Returns the function.
// Note: this code was factored out of AllocateFunction such that // Note: this code was factored out of AllocateFunction such that
// other parts of the VM could use it. Specifically, a function that creates // other parts of the VM could use it. Specifically, a function that creates
// instances of type JS_FUNCTION_TYPE benefit from the use of this function. // instances of type JS_FUNCTION_TYPE benefit from the use of this function.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
inline void InitializeFunction( MUST_USE_RESULT inline MaybeObject* InitializeFunction(
JSFunction* function, JSFunction* function,
SharedFunctionInfo* shared, SharedFunctionInfo* shared,
Object* prototype); Object* prototype);
@ -1734,8 +1621,6 @@ class Heap {
return high_survival_rate_period_length_ > 0; return high_survival_rate_period_length_ > 0;
} }
void SelectScavengingVisitorsTable();
static const int kInitialSymbolTableSize = 2048; static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64; static const int kInitialEvalCacheSize = 64;
@ -1755,11 +1640,10 @@ class Heap {
MarkCompactCollector mark_compact_collector_; MarkCompactCollector mark_compact_collector_;
StoreBuffer store_buffer_; // This field contains the meaning of the WATERMARK_INVALIDATED flag.
// Instead of clearing this flag from all pages we just flip
Marking marking_; // its meaning at the beginning of a scavenge.
intptr_t page_watermark_invalidated_mark_;
IncrementalMarking incremental_marking_;
int number_idle_notifications_; int number_idle_notifications_;
unsigned int last_idle_notification_gc_count_; unsigned int last_idle_notification_gc_count_;
@ -1774,9 +1658,7 @@ class Heap {
ExternalStringTable external_string_table_; ExternalStringTable external_string_table_;
VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_; bool is_safe_to_read_maps_;
MemoryChunk* chunks_queued_for_free_;
friend class Factory; friend class Factory;
friend class GCTracer; friend class GCTracer;
@ -1875,6 +1757,29 @@ class VerifyPointersVisitor: public ObjectVisitor {
} }
} }
}; };
// Visitor class to verify interior pointers in spaces that use region marks
// to keep track of intergenerational references.
// As VerifyPointersVisitor but also checks that dirty marks are set
// for regions covering intergenerational references.
class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
ASSERT(HEAP->Contains(object));
ASSERT(object->map()->IsMap());
if (HEAP->InNewSpace(object)) {
ASSERT(HEAP->InToSpace(object));
Address addr = reinterpret_cast<Address>(current);
ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr));
}
}
}
}
};
#endif #endif
@ -2207,6 +2112,16 @@ class GCTracer BASE_EMBEDDED {
// Sets the full GC count. // Sets the full GC count.
void set_full_gc_count(int count) { full_gc_count_ = count; } void set_full_gc_count(int count) { full_gc_count_ = count; }
// Sets the flag that this is a compacting full GC.
void set_is_compacting() { is_compacting_ = true; }
bool is_compacting() const { return is_compacting_; }
// Increment and decrement the count of marked objects.
void increment_marked_count() { ++marked_count_; }
void decrement_marked_count() { --marked_count_; }
int marked_count() { return marked_count_; }
void increment_promoted_objects_size(int object_size) { void increment_promoted_objects_size(int object_size) {
promoted_objects_size_ += object_size; promoted_objects_size_ += object_size;
} }
@ -2231,6 +2146,23 @@ class GCTracer BASE_EMBEDDED {
// A count (including this one) of the number of full garbage collections. // A count (including this one) of the number of full garbage collections.
int full_gc_count_; int full_gc_count_;
// True if the current GC is a compacting full collection, false
// otherwise.
bool is_compacting_;
// True if the *previous* full GC cwas a compacting collection (will be
// false if there has not been a previous full GC).
bool previous_has_compacted_;
// On a full GC, a count of the number of marked objects. Incremented
// when an object is marked and decremented when an object's mark bit is
// cleared. Will be zero on a scavenge collection.
int marked_count_;
// The count from the end of the previous full GC. Will be zero if there
// was no previous full GC.
int previous_marked_count_;
// Amounts of time spent in different scopes during GC. // Amounts of time spent in different scopes during GC.
double scopes_[Scope::kNumberOfScopes]; double scopes_[Scope::kNumberOfScopes];
@ -2249,13 +2181,6 @@ class GCTracer BASE_EMBEDDED {
// Size of objects promoted during the current collection. // Size of objects promoted during the current collection.
intptr_t promoted_objects_size_; intptr_t promoted_objects_size_;
// Incremental marking steps counters.
int steps_count_;
double steps_took_;
double longest_step_;
int steps_count_since_last_gc_;
double steps_took_since_last_gc_;
Heap* heap_; Heap* heap_;
}; };
@ -2367,46 +2292,6 @@ class WeakObjectRetainer {
}; };
// Intrusive object marking uses least significant bit of
// heap object's map word to mark objects.
// Normally all map words have least significant bit set
// because they contain tagged map pointer.
// If the bit is not set object is marked.
// All objects should be unmarked before resuming
// JavaScript execution.
class IntrusiveMarking {
public:
static bool IsMarked(HeapObject* object) {
return (object->map_word().ToRawValue() & kNotMarkedBit) == 0;
}
static void ClearMark(HeapObject* object) {
uintptr_t map_word = object->map_word().ToRawValue();
object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit));
ASSERT(!IsMarked(object));
}
static void SetMark(HeapObject* object) {
uintptr_t map_word = object->map_word().ToRawValue();
object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit));
ASSERT(IsMarked(object));
}
static Map* MapOfMarkedObject(HeapObject* object) {
uintptr_t map_word = object->map_word().ToRawValue();
return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap();
}
static int SizeOfMarkedObject(HeapObject* object) {
return object->SizeFromMap(MapOfMarkedObject(object));
}
private:
static const uintptr_t kNotMarkedBit = 0x1;
STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0);
};
#if defined(DEBUG) || defined(LIVE_OBJECT_LIST) #if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
// Helper class for tracing paths to a search target Object from all roots. // Helper class for tracing paths to a search target Object from all roots.
// The TracePathFrom() method can be used to trace paths from a specific // The TracePathFrom() method can be used to trace paths from a specific
@ -2465,6 +2350,7 @@ class PathTracer : public ObjectVisitor {
}; };
#endif // DEBUG || LIVE_OBJECT_LIST #endif // DEBUG || LIVE_OBJECT_LIST
} } // namespace v8::internal } } // namespace v8::internal
#undef HEAP #undef HEAP

66
deps/v8/src/hydrogen-instructions.cc

@ -707,14 +707,6 @@ void HUnaryControlInstruction::PrintDataTo(StringStream* stream) {
} }
void HIsNilAndBranch::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(kind() == kStrictEquality ? " === " : " == ");
stream->Add(nil() == kNullValue ? "null" : "undefined");
HControlInstruction::PrintDataTo(stream);
}
void HReturn::PrintDataTo(StringStream* stream) { void HReturn::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream); value()->PrintNameTo(stream);
} }
@ -785,22 +777,15 @@ void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream); value()->PrintNameTo(stream);
stream->Add(" == "); stream->Add(" == ");
stream->Add(type_literal_->GetFlatContent().ToAsciiVector()); stream->Add(type_literal_->GetFlatContent().ToAsciiVector());
HControlInstruction::PrintDataTo(stream);
}
void HTypeof::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
} }
void HChange::PrintDataTo(StringStream* stream) { void HChange::PrintDataTo(StringStream* stream) {
HUnaryOperation::PrintDataTo(stream); HUnaryOperation::PrintDataTo(stream);
stream->Add(" %s to %s", from().Mnemonic(), to().Mnemonic()); stream->Add(" %s to %s", from_.Mnemonic(), to().Mnemonic());
if (CanTruncateToInt32()) stream->Add(" truncating-int32"); if (CanTruncateToInt32()) stream->Add(" truncating-int32");
if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?"); if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
if (CheckFlag(kDeoptimizeOnUndefined)) stream->Add(" deopt-on-undefined");
} }
@ -872,23 +857,6 @@ void HCheckFunction::PrintDataTo(StringStream* stream) {
} }
const char* HCheckInstanceType::GetCheckName() {
switch (check_) {
case IS_SPEC_OBJECT: return "object";
case IS_JS_ARRAY: return "array";
case IS_STRING: return "string";
case IS_SYMBOL: return "symbol";
}
UNREACHABLE();
return "";
}
void HCheckInstanceType::PrintDataTo(StringStream* stream) {
stream->Add("%s ", GetCheckName());
HUnaryOperation::PrintDataTo(stream);
}
void HCallStub::PrintDataTo(StringStream* stream) { void HCallStub::PrintDataTo(StringStream* stream) {
stream->Add("%s ", stream->Add("%s ",
CodeStub::MajorName(major_key_, false)); CodeStub::MajorName(major_key_, false));
@ -1343,14 +1311,6 @@ void HCompareIDAndBranch::PrintDataTo(StringStream* stream) {
} }
void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) {
left()->PrintNameTo(stream);
stream->Add(" ");
right()->PrintNameTo(stream);
HControlInstruction::PrintDataTo(stream);
}
void HGoto::PrintDataTo(StringStream* stream) { void HGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", SuccessorAt(0)->block_id()); stream->Add("B%d", SuccessorAt(0)->block_id());
} }
@ -1465,7 +1425,7 @@ void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
} }
bool HLoadKeyedFastElement::RequiresHoleCheck() { bool HLoadKeyedFastElement::RequiresHoleCheck() const {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) { for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value(); HValue* use = it.value();
if (!use->IsChange()) return true; if (!use->IsChange()) return true;
@ -1482,6 +1442,11 @@ void HLoadKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
} }
bool HLoadKeyedFastDoubleElement::RequiresHoleCheck() const {
return true;
}
void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) { void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream); object()->PrintNameTo(stream);
stream->Add("["); stream->Add("[");
@ -1523,7 +1488,6 @@ void HLoadKeyedSpecializedArrayElement::PrintDataTo(
stream->Add("pixel"); stream->Add("pixel");
break; break;
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
@ -1618,7 +1582,6 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo(
case EXTERNAL_PIXEL_ELEMENTS: case EXTERNAL_PIXEL_ELEMENTS:
stream->Add("pixel"); stream->Add("pixel");
break; break;
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
@ -1635,18 +1598,7 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo(
void HLoadGlobalCell::PrintDataTo(StringStream* stream) { void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
stream->Add("[%p]", *cell()); stream->Add("[%p]", *cell());
if (!details_.IsDontDelete()) stream->Add(" (deleteable)"); if (check_hole_value()) stream->Add(" (deleteable/read-only)");
if (details_.IsReadOnly()) stream->Add(" (read-only)");
}
bool HLoadGlobalCell::RequiresHoleCheck() {
if (details_.IsDontDelete() && !details_.IsReadOnly()) return false;
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
if (!use->IsChange()) return true;
}
return false;
} }
@ -1658,8 +1610,6 @@ void HLoadGlobalGeneric::PrintDataTo(StringStream* stream) {
void HStoreGlobalCell::PrintDataTo(StringStream* stream) { void HStoreGlobalCell::PrintDataTo(StringStream* stream) {
stream->Add("[%p] = ", *cell()); stream->Add("[%p] = ", *cell());
value()->PrintNameTo(stream); value()->PrintNameTo(stream);
if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
if (details_.IsReadOnly()) stream->Add(" (read-only)");
} }

344
deps/v8/src/hydrogen-instructions.h

File diff suppressed because it is too large

408
deps/v8/src/hydrogen.cc

@ -422,7 +422,7 @@ class ReachabilityAnalyzer BASE_EMBEDDED {
}; };
void HGraph::Verify(bool do_full_verify) const { void HGraph::Verify() const {
for (int i = 0; i < blocks_.length(); i++) { for (int i = 0; i < blocks_.length(); i++) {
HBasicBlock* block = blocks_.at(i); HBasicBlock* block = blocks_.at(i);
@ -473,27 +473,25 @@ void HGraph::Verify(bool do_full_verify) const {
// Check special property of first block to have no predecessors. // Check special property of first block to have no predecessors.
ASSERT(blocks_.at(0)->predecessors()->is_empty()); ASSERT(blocks_.at(0)->predecessors()->is_empty());
if (do_full_verify) { // Check that the graph is fully connected.
// Check that the graph is fully connected. ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL); ASSERT(analyzer.visited_count() == blocks_.length());
ASSERT(analyzer.visited_count() == blocks_.length());
// Check that entry block dominator is NULL. // Check that entry block dominator is NULL.
ASSERT(entry_block_->dominator() == NULL); ASSERT(entry_block_->dominator() == NULL);
// Check dominators. // Check dominators.
for (int i = 0; i < blocks_.length(); ++i) { for (int i = 0; i < blocks_.length(); ++i) {
HBasicBlock* block = blocks_.at(i); HBasicBlock* block = blocks_.at(i);
if (block->dominator() == NULL) { if (block->dominator() == NULL) {
// Only start block may have no dominator assigned to. // Only start block may have no dominator assigned to.
ASSERT(i == 0); ASSERT(i == 0);
} else { } else {
// Assert that block is unreachable if dominator must not be visited. // Assert that block is unreachable if dominator must not be visited.
ReachabilityAnalyzer dominator_analyzer(entry_block_, ReachabilityAnalyzer dominator_analyzer(entry_block_,
blocks_.length(), blocks_.length(),
block->dominator()); block->dominator());
ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id())); ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id()));
}
} }
} }
} }
@ -852,7 +850,7 @@ void HGraph::EliminateUnreachablePhis() {
} }
bool HGraph::CheckArgumentsPhiUses() { bool HGraph::CheckPhis() {
int block_count = blocks_.length(); int block_count = blocks_.length();
for (int i = 0; i < block_count; ++i) { for (int i = 0; i < block_count; ++i) {
for (int j = 0; j < blocks_[i]->phis()->length(); ++j) { for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
@ -865,11 +863,13 @@ bool HGraph::CheckArgumentsPhiUses() {
} }
bool HGraph::CheckConstPhiUses() { bool HGraph::CollectPhis() {
int block_count = blocks_.length(); int block_count = blocks_.length();
phi_list_ = new ZoneList<HPhi*>(block_count);
for (int i = 0; i < block_count; ++i) { for (int i = 0; i < block_count; ++i) {
for (int j = 0; j < blocks_[i]->phis()->length(); ++j) { for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
HPhi* phi = blocks_[i]->phis()->at(j); HPhi* phi = blocks_[i]->phis()->at(j);
phi_list_->Add(phi);
// Check for the hole value (from an uninitialized const). // Check for the hole value (from an uninitialized const).
for (int k = 0; k < phi->OperandCount(); k++) { for (int k = 0; k < phi->OperandCount(); k++) {
if (phi->OperandAt(k) == GetConstantHole()) return false; if (phi->OperandAt(k) == GetConstantHole()) return false;
@ -880,18 +880,6 @@ bool HGraph::CheckConstPhiUses() {
} }
void HGraph::CollectPhis() {
int block_count = blocks_.length();
phi_list_ = new ZoneList<HPhi*>(block_count);
for (int i = 0; i < block_count; ++i) {
for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
HPhi* phi = blocks_[i]->phis()->at(j);
phi_list_->Add(phi);
}
}
}
void HGraph::InferTypes(ZoneList<HValue*>* worklist) { void HGraph::InferTypes(ZoneList<HValue*>* worklist) {
BitVector in_worklist(GetMaximumValueID()); BitVector in_worklist(GetMaximumValueID());
for (int i = 0; i < worklist->length(); ++i) { for (int i = 0; i < worklist->length(); ++i) {
@ -1860,7 +1848,7 @@ void HGraph::InsertRepresentationChangeForUse(HValue* value,
} }
if (new_value == NULL) { if (new_value == NULL) {
new_value = new(zone()) HChange(value, to, new_value = new(zone()) HChange(value, value->representation(), to,
is_truncating, deoptimize_on_undefined); is_truncating, deoptimize_on_undefined);
} }
@ -2332,24 +2320,17 @@ HGraph* HGraphBuilder::CreateGraph() {
graph()->OrderBlocks(); graph()->OrderBlocks();
graph()->AssignDominators(); graph()->AssignDominators();
#ifdef DEBUG
// Do a full verify after building the graph and computing dominators.
graph()->Verify(true);
#endif
graph()->PropagateDeoptimizingMark(); graph()->PropagateDeoptimizingMark();
if (!graph()->CheckConstPhiUses()) {
Bailout("Unsupported phi use of const variable");
return NULL;
}
graph()->EliminateRedundantPhis(); graph()->EliminateRedundantPhis();
if (!graph()->CheckArgumentsPhiUses()) { if (!graph()->CheckPhis()) {
Bailout("Unsupported phi use of arguments"); Bailout("Unsupported phi use of arguments object");
return NULL; return NULL;
} }
if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis(); if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis();
graph()->CollectPhis(); if (!graph()->CollectPhis()) {
Bailout("Unsupported phi use of uninitialized constant");
return NULL;
}
HInferRepresentation rep(graph()); HInferRepresentation rep(graph());
rep.Analyze(); rep.Analyze();
@ -3146,16 +3127,6 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
} }
switch (variable->location()) { switch (variable->location()) {
case Variable::UNALLOCATED: { case Variable::UNALLOCATED: {
// Handle known global constants like 'undefined' specially to avoid a
// load from a global cell for them.
Handle<Object> constant_value =
isolate()->factory()->GlobalConstantFor(variable->name());
if (!constant_value.is_null()) {
HConstant* instr =
new(zone()) HConstant(constant_value, Representation::Tagged());
return ast_context()->ReturnInstruction(instr, expr->id());
}
LookupResult lookup; LookupResult lookup;
GlobalPropertyAccess type = GlobalPropertyAccess type =
LookupGlobalProperty(variable, &lookup, false); LookupGlobalProperty(variable, &lookup, false);
@ -3168,8 +3139,8 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
if (type == kUseCell) { if (type == kUseCell) {
Handle<GlobalObject> global(info()->global_object()); Handle<GlobalObject> global(info()->global_object());
Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup)); Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
HLoadGlobalCell* instr = bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
new(zone()) HLoadGlobalCell(cell, lookup.GetPropertyDetails()); HLoadGlobalCell* instr = new(zone()) HLoadGlobalCell(cell, check_hole);
return ast_context()->ReturnInstruction(instr, expr->id()); return ast_context()->ReturnInstruction(instr, expr->id());
} else { } else {
HValue* context = environment()->LookupContext(); HValue* context = environment()->LookupContext();
@ -3346,43 +3317,7 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
HValue* key = AddInstruction( HValue* key = AddInstruction(
new(zone()) HConstant(Handle<Object>(Smi::FromInt(i)), new(zone()) HConstant(Handle<Object>(Smi::FromInt(i)),
Representation::Integer32())); Representation::Integer32()));
HInstruction* elements_kind =
AddInstruction(new(zone()) HElementsKind(literal));
HBasicBlock* store_fast = graph()->CreateBasicBlock();
// Two empty blocks to satisfy edge split form.
HBasicBlock* store_fast_edgesplit1 = graph()->CreateBasicBlock();
HBasicBlock* store_fast_edgesplit2 = graph()->CreateBasicBlock();
HBasicBlock* store_generic = graph()->CreateBasicBlock();
HBasicBlock* check_smi_only_elements = graph()->CreateBasicBlock();
HBasicBlock* join = graph()->CreateBasicBlock();
HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(value);
smicheck->SetSuccessorAt(0, store_fast_edgesplit1);
smicheck->SetSuccessorAt(1, check_smi_only_elements);
current_block()->Finish(smicheck);
store_fast_edgesplit1->Finish(new(zone()) HGoto(store_fast));
set_current_block(check_smi_only_elements);
HCompareConstantEqAndBranch* smi_elements_check =
new(zone()) HCompareConstantEqAndBranch(elements_kind,
FAST_SMI_ONLY_ELEMENTS,
Token::EQ_STRICT);
smi_elements_check->SetSuccessorAt(0, store_generic);
smi_elements_check->SetSuccessorAt(1, store_fast_edgesplit2);
current_block()->Finish(smi_elements_check);
store_fast_edgesplit2->Finish(new(zone()) HGoto(store_fast));
set_current_block(store_fast);
AddInstruction(new(zone()) HStoreKeyedFastElement(elements, key, value)); AddInstruction(new(zone()) HStoreKeyedFastElement(elements, key, value));
store_fast->Goto(join);
set_current_block(store_generic);
AddInstruction(BuildStoreKeyedGeneric(literal, key, value));
store_generic->Goto(join);
join->SetJoinId(expr->id());
set_current_block(join);
AddSimulate(expr->GetIdForElement(i)); AddSimulate(expr->GetIdForElement(i));
} }
return ast_context()->ReturnValue(Pop()); return ast_context()->ReturnValue(Pop());
@ -3626,10 +3561,10 @@ void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
LookupResult lookup; LookupResult lookup;
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true); GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
if (type == kUseCell) { if (type == kUseCell) {
bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
Handle<GlobalObject> global(info()->global_object()); Handle<GlobalObject> global(info()->global_object());
Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup)); Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
HInstruction* instr = HInstruction* instr = new(zone()) HStoreGlobalCell(value, cell, check_hole);
new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails());
instr->set_position(position); instr->set_position(position);
AddInstruction(instr); AddInstruction(instr);
if (instr->HasSideEffects()) AddSimulate(ast_id); if (instr->HasSideEffects()) AddSimulate(ast_id);
@ -3993,7 +3928,6 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
break; break;
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
@ -4010,30 +3944,6 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
} }
HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
HValue* checked_key,
HValue* val,
ElementsKind elements_kind,
bool is_store) {
if (is_store) {
ASSERT(val != NULL);
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
return new(zone()) HStoreKeyedFastDoubleElement(
elements, checked_key, val);
} else { // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS.
return new(zone()) HStoreKeyedFastElement(
elements, checked_key, val, elements_kind);
}
}
// It's an element load (!is_store).
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
} else { // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS.
return new(zone()) HLoadKeyedFastElement(elements, checked_key);
}
}
HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object, HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
HValue* key, HValue* key,
HValue* val, HValue* val,
@ -4041,20 +3951,17 @@ HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
bool is_store) { bool is_store) {
ASSERT(expr->IsMonomorphic()); ASSERT(expr->IsMonomorphic());
Handle<Map> map = expr->GetMonomorphicReceiverType(); Handle<Map> map = expr->GetMonomorphicReceiverType();
AddInstruction(new(zone()) HCheckNonSmi(object)); if (!map->has_fast_elements() &&
HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map)); !map->has_fast_double_elements() &&
bool fast_smi_only_elements = map->has_fast_smi_only_elements();
bool fast_elements = map->has_fast_elements();
bool fast_double_elements = map->has_fast_double_elements();
if (!fast_smi_only_elements &&
!fast_elements &&
!fast_double_elements &&
!map->has_external_array_elements()) { !map->has_external_array_elements()) {
return is_store ? BuildStoreKeyedGeneric(object, key, val) return is_store ? BuildStoreKeyedGeneric(object, key, val)
: BuildLoadKeyedGeneric(object, key); : BuildLoadKeyedGeneric(object, key);
} }
AddInstruction(new(zone()) HCheckNonSmi(object));
HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map));
HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object)); HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
if (is_store && (fast_elements || fast_smi_only_elements)) { bool fast_double_elements = map->has_fast_double_elements();
if (is_store && map->has_fast_elements()) {
AddInstruction(new(zone()) HCheckMap( AddInstruction(new(zone()) HCheckMap(
elements, isolate()->factory()->fixed_array_map())); elements, isolate()->factory()->fixed_array_map()));
} }
@ -4069,15 +3976,28 @@ HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
return BuildExternalArrayElementAccess(external_elements, checked_key, return BuildExternalArrayElementAccess(external_elements, checked_key,
val, map->elements_kind(), is_store); val, map->elements_kind(), is_store);
} }
ASSERT(fast_smi_only_elements || fast_elements || fast_double_elements); ASSERT(map->has_fast_elements() || fast_double_elements);
if (map->instance_type() == JS_ARRAY_TYPE) { if (map->instance_type() == JS_ARRAY_TYPE) {
length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck)); length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck));
} else { } else {
length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements)); length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
} }
checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length)); checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
return BuildFastElementAccess(elements, checked_key, val, if (is_store) {
map->elements_kind(), is_store); if (fast_double_elements) {
return new(zone()) HStoreKeyedFastDoubleElement(elements,
checked_key,
val);
} else {
return new(zone()) HStoreKeyedFastElement(elements, checked_key, val);
}
} else {
if (fast_double_elements) {
return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
} else {
return new(zone()) HLoadKeyedFastElement(elements, checked_key);
}
}
} }
@ -4119,20 +4039,14 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
HLoadExternalArrayPointer* external_elements = NULL; HLoadExternalArrayPointer* external_elements = NULL;
HInstruction* checked_key = NULL; HInstruction* checked_key = NULL;
// Generated code assumes that FAST_SMI_ONLY_ELEMENTS, FAST_ELEMENTS, // FAST_ELEMENTS is assumed to be the first case.
// FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS are handled before external STATIC_ASSERT(FAST_ELEMENTS == 0);
// arrays.
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(FAST_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(DICTIONARY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
for (ElementsKind elements_kind = FIRST_ELEMENTS_KIND; for (ElementsKind elements_kind = FAST_ELEMENTS;
elements_kind <= LAST_ELEMENTS_KIND; elements_kind <= LAST_ELEMENTS_KIND;
elements_kind = ElementsKind(elements_kind + 1)) { elements_kind = ElementsKind(elements_kind + 1)) {
// After having handled FAST_ELEMENTS, FAST_SMI_ONLY_ELEMENTS, // After having handled FAST_ELEMENTS and DICTIONARY_ELEMENTS, we
// FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS, we need to add some code // need to add some code that's executed for all external array cases.
// that's executed for all external array cases.
STATIC_ASSERT(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND == STATIC_ASSERT(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND ==
LAST_ELEMENTS_KIND); LAST_ELEMENTS_KIND);
if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND
@ -4154,25 +4068,15 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
set_current_block(if_true); set_current_block(if_true);
HInstruction* access; HInstruction* access;
if (elements_kind == FAST_SMI_ONLY_ELEMENTS || if (elements_kind == FAST_ELEMENTS ||
elements_kind == FAST_ELEMENTS ||
elements_kind == FAST_DOUBLE_ELEMENTS) { elements_kind == FAST_DOUBLE_ELEMENTS) {
if (is_store && elements_kind == FAST_SMI_ONLY_ELEMENTS) { bool fast_double_elements =
AddInstruction(new(zone()) HCheckSmi(val)); elements_kind == FAST_DOUBLE_ELEMENTS;
} if (is_store && elements_kind == FAST_ELEMENTS) {
if (is_store && elements_kind != FAST_DOUBLE_ELEMENTS) {
AddInstruction(new(zone()) HCheckMap( AddInstruction(new(zone()) HCheckMap(
elements, isolate()->factory()->fixed_array_map(), elements, isolate()->factory()->fixed_array_map(),
elements_kind_branch)); elements_kind_branch));
} }
// TODO(jkummerow): The need for these two blocks could be avoided
// in one of two ways:
// (1) Introduce ElementsKinds for JSArrays that are distinct from
// those for fast objects.
// (2) Put the common instructions into a third "join" block. This
// requires additional AST IDs that we can deopt to from inside
// that join block. They must be added to the Property class (when
// it's a keyed property) and registered in the full codegen.
HBasicBlock* if_jsarray = graph()->CreateBasicBlock(); HBasicBlock* if_jsarray = graph()->CreateBasicBlock();
HBasicBlock* if_fastobject = graph()->CreateBasicBlock(); HBasicBlock* if_fastobject = graph()->CreateBasicBlock();
HHasInstanceTypeAndBranch* typecheck = HHasInstanceTypeAndBranch* typecheck =
@ -4182,15 +4086,29 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
current_block()->Finish(typecheck); current_block()->Finish(typecheck);
set_current_block(if_jsarray); set_current_block(if_jsarray);
HInstruction* length; HInstruction* length = new(zone()) HJSArrayLength(object, typecheck);
length = AddInstruction(new(zone()) HJSArrayLength(object, typecheck)); AddInstruction(length);
checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length)); checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
access = AddInstruction(BuildFastElementAccess( if (is_store) {
elements, checked_key, val, elements_kind, is_store)); if (fast_double_elements) {
if (!is_store) { access = AddInstruction(
new(zone()) HStoreKeyedFastDoubleElement(elements,
checked_key,
val));
} else {
access = AddInstruction(
new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
}
} else {
if (fast_double_elements) {
access = AddInstruction(
new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key));
} else {
access = AddInstruction(
new(zone()) HLoadKeyedFastElement(elements, checked_key));
}
Push(access); Push(access);
} }
*has_side_effects |= access->HasSideEffects(); *has_side_effects |= access->HasSideEffects();
if (position != -1) { if (position != -1) {
access->set_position(position); access->set_position(position);
@ -4200,8 +4118,25 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
set_current_block(if_fastobject); set_current_block(if_fastobject);
length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements)); length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length)); checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
access = AddInstruction(BuildFastElementAccess( if (is_store) {
elements, checked_key, val, elements_kind, is_store)); if (fast_double_elements) {
access = AddInstruction(
new(zone()) HStoreKeyedFastDoubleElement(elements,
checked_key,
val));
} else {
access = AddInstruction(
new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
}
} else {
if (fast_double_elements) {
access = AddInstruction(
new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key));
} else {
access = AddInstruction(
new(zone()) HLoadKeyedFastElement(elements, checked_key));
}
}
} else if (elements_kind == DICTIONARY_ELEMENTS) { } else if (elements_kind == DICTIONARY_ELEMENTS) {
if (is_store) { if (is_store) {
access = AddInstruction(BuildStoreKeyedGeneric(object, key, val)); access = AddInstruction(BuildStoreKeyedGeneric(object, key, val));
@ -4539,25 +4474,20 @@ bool HGraphBuilder::TryInline(Call* expr) {
return false; return false;
} }
// No context change required.
CompilationInfo* outer_info = info(); CompilationInfo* outer_info = info();
#if !defined(V8_TARGET_ARCH_IA32)
// Target must be able to use caller's context.
if (target->context() != outer_info->closure()->context() || if (target->context() != outer_info->closure()->context() ||
outer_info->scope()->contains_with() || outer_info->scope()->contains_with() ||
outer_info->scope()->num_heap_slots() > 0) { outer_info->scope()->num_heap_slots() > 0) {
TraceInline(target, caller, "target requires context change"); TraceInline(target, caller, "target requires context change");
return false; return false;
} }
#endif
// Don't inline deeper than kMaxInliningLevels calls. // Don't inline deeper than kMaxInliningLevels calls.
HEnvironment* env = environment(); HEnvironment* env = environment();
int current_level = 1; int current_level = 1;
while (env->outer() != NULL) { while (env->outer() != NULL) {
if (current_level == (FLAG_limit_inlining if (current_level == Compiler::kMaxInliningLevels) {
? Compiler::kMaxInliningLevels
: 2 * Compiler::kMaxInliningLevels)) {
TraceInline(target, caller, "inline depth limit reached"); TraceInline(target, caller, "inline depth limit reached");
return false; return false;
} }
@ -4663,8 +4593,7 @@ bool HGraphBuilder::TryInline(Call* expr) {
ASSERT(target_shared->has_deoptimization_support()); ASSERT(target_shared->has_deoptimization_support());
TypeFeedbackOracle target_oracle( TypeFeedbackOracle target_oracle(
Handle<Code>(target_shared->code()), Handle<Code>(target_shared->code()),
Handle<Context>(target->context()->global_context()), Handle<Context>(target->context()->global_context()));
isolate());
FunctionState target_state(this, &target_info, &target_oracle); FunctionState target_state(this, &target_info, &target_oracle);
HConstant* undefined = graph()->GetConstantUndefined(); HConstant* undefined = graph()->GetConstantUndefined();
@ -4673,17 +4602,6 @@ bool HGraphBuilder::TryInline(Call* expr) {
function, function,
undefined, undefined,
call_kind); call_kind);
#ifdef V8_TARGET_ARCH_IA32
// IA32 only, overwrite the caller's context in the deoptimization
// environment with the correct one.
//
// TODO(kmillikin): implement the same inlining on other platforms so we
// can remove the unsightly ifdefs in this function.
HConstant* context = new HConstant(Handle<Context>(target->context()),
Representation::Tagged());
AddInstruction(context);
inner_env->BindContext(context);
#endif
HBasicBlock* body_entry = CreateBasicBlock(inner_env); HBasicBlock* body_entry = CreateBasicBlock(inner_env);
current_block()->Goto(body_entry); current_block()->Goto(body_entry);
body_entry->SetJoinId(expr->ReturnId()); body_entry->SetJoinId(expr->ReturnId());
@ -5004,8 +4922,8 @@ void HGraphBuilder::VisitCall(Call* expr) {
} }
} else { } else {
expr->RecordTypeFeedback(oracle(), CALL_AS_FUNCTION);
VariableProxy* proxy = expr->expression()->AsVariableProxy(); VariableProxy* proxy = expr->expression()->AsVariableProxy();
// FIXME.
bool global_call = proxy != NULL && proxy->var()->IsUnallocated(); bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
if (global_call) { if (global_call) {
@ -5057,46 +4975,6 @@ void HGraphBuilder::VisitCall(Call* expr) {
Drop(argument_count); Drop(argument_count);
} }
} else if (expr->IsMonomorphic()) {
// The function is on the stack in the unoptimized code during
// evaluation of the arguments.
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* function = Top();
HValue* context = environment()->LookupContext();
HGlobalObject* global = new(zone()) HGlobalObject(context);
HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global);
AddInstruction(global);
PushAndAdd(receiver);
CHECK_ALIVE(VisitExpressions(expr->arguments()));
AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
if (TryInline(expr)) {
// The function is lingering in the deoptimization environment.
// Handle it by case analysis on the AST context.
if (ast_context()->IsEffect()) {
Drop(1);
} else if (ast_context()->IsValue()) {
HValue* result = Pop();
Drop(1);
Push(result);
} else if (ast_context()->IsTest()) {
TestContext* context = TestContext::cast(ast_context());
if (context->if_true()->HasPredecessor()) {
context->if_true()->last_environment()->Drop(1);
}
if (context->if_false()->HasPredecessor()) {
context->if_true()->last_environment()->Drop(1);
}
} else {
UNREACHABLE();
}
return;
} else {
call = PreProcessCall(new(zone()) HInvokeFunction(context,
function,
argument_count));
Drop(1); // The function.
}
} else { } else {
CHECK_ALIVE(VisitArgument(expr->expression())); CHECK_ALIVE(VisitArgument(expr->expression()));
HValue* context = environment()->LookupContext(); HValue* context = environment()->LookupContext();
@ -5790,36 +5668,26 @@ Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
} }
void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr, void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* compare_expr,
Expression* sub_expr, Expression* expr,
Handle<String> check) { Handle<String> check) {
CHECK_ALIVE(VisitForTypeOf(sub_expr)); CHECK_ALIVE(VisitForTypeOf(expr));
HValue* value = Pop(); HValue* expr_value = Pop();
HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check); HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(expr_value, check);
instr->set_position(expr->position()); instr->set_position(compare_expr->position());
return ast_context()->ReturnControl(instr, expr->id()); return ast_context()->ReturnControl(instr, compare_expr->id());
} }
bool HGraphBuilder::TryLiteralCompare(CompareOperation* expr) { void HGraphBuilder::HandleLiteralCompareUndefined(
Expression *sub_expr; CompareOperation* compare_expr, Expression* expr) {
Handle<String> check; CHECK_ALIVE(VisitForValue(expr));
if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) { HValue* lhs = Pop();
HandleLiteralCompareTypeof(expr, sub_expr, check); HValue* rhs = graph()->GetConstantUndefined();
return true; HCompareObjectEqAndBranch* instr =
} new(zone()) HCompareObjectEqAndBranch(lhs, rhs);
instr->set_position(compare_expr->position());
if (expr->IsLiteralCompareUndefined(&sub_expr)) { return ast_context()->ReturnControl(instr, compare_expr->id());
HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue);
return true;
}
if (expr->IsLiteralCompareNull(&sub_expr)) {
HandleLiteralCompareNil(expr, sub_expr, kNullValue);
return true;
}
return false;
} }
@ -5841,7 +5709,17 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
} }
// Check for special cases that compare against literals. // Check for special cases that compare against literals.
if (TryLiteralCompare(expr)) return; Expression *sub_expr;
Handle<String> check;
if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
HandleLiteralCompareTypeof(expr, sub_expr, check);
return;
}
if (expr->IsLiteralCompareUndefined(&sub_expr)) {
HandleLiteralCompareUndefined(expr, sub_expr);
return;
}
TypeInfo type_info = oracle()->CompareType(expr); TypeInfo type_info = oracle()->CompareType(expr);
// Check if this expression was ever executed according to type feedback. // Check if this expression was ever executed according to type feedback.
@ -5946,18 +5824,14 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
} }
void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr, void HGraphBuilder::VisitCompareToNull(CompareToNull* expr) {
Expression* sub_expr,
NilValue nil) {
ASSERT(!HasStackOverflow()); ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL); ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor()); ASSERT(current_block()->HasPredecessor());
CHECK_ALIVE(VisitForValue(sub_expr)); CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* value = Pop(); HValue* value = Pop();
EqualityKind kind = HIsNullAndBranch* instr =
expr->op() == Token::EQ_STRICT ? kStrictEquality : kNonStrictEquality; new(zone()) HIsNullAndBranch(value, expr->is_strict());
HIsNilAndBranch* instr = new(zone()) HIsNilAndBranch(value, kind, nil);
instr->set_position(expr->position());
return ast_context()->ReturnControl(instr, expr->id()); return ast_context()->ReturnControl(instr, expr->id());
} }
@ -6040,7 +5914,9 @@ void HGraphBuilder::GenerateIsFunction(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop(); HValue* value = Pop();
HHasInstanceTypeAndBranch* result = HHasInstanceTypeAndBranch* result =
new(zone()) HHasInstanceTypeAndBranch(value, JS_FUNCTION_TYPE); new(zone()) HHasInstanceTypeAndBranch(value,
JS_FUNCTION_TYPE,
JS_FUNCTION_PROXY_TYPE);
return ast_context()->ReturnControl(result, call->id()); return ast_context()->ReturnControl(result, call->id());
} }
@ -6940,7 +6816,7 @@ void HPhase::End() const {
} }
#ifdef DEBUG #ifdef DEBUG
if (graph_ != NULL) graph_->Verify(false); // No full verify. if (graph_ != NULL) graph_->Verify();
if (allocator_ != NULL) allocator_->Verify(); if (allocator_ != NULL) allocator_->Verify();
#endif #endif
} }

27
deps/v8/src/hydrogen.h

@ -243,13 +243,11 @@ class HGraph: public ZoneObject {
// Returns false if there are phi-uses of the arguments-object // Returns false if there are phi-uses of the arguments-object
// which are not supported by the optimizing compiler. // which are not supported by the optimizing compiler.
bool CheckArgumentsPhiUses(); bool CheckPhis();
// Returns false if there are phi-uses of an uninitialized const // Returns false if there are phi-uses of hole values comming
// which are not supported by the optimizing compiler. // from uninitialized consts.
bool CheckConstPhiUses(); bool CollectPhis();
void CollectPhis();
Handle<Code> Compile(CompilationInfo* info); Handle<Code> Compile(CompilationInfo* info);
@ -285,7 +283,7 @@ class HGraph: public ZoneObject {
} }
#ifdef DEBUG #ifdef DEBUG
void Verify(bool do_full_verify) const; void Verify() const;
#endif #endif
private: private:
@ -912,13 +910,11 @@ class HGraphBuilder: public AstVisitor {
HValue* receiver, HValue* receiver,
SmallMapList* types, SmallMapList* types,
Handle<String> name); Handle<String> name);
bool TryLiteralCompare(CompareOperation* expr); void HandleLiteralCompareTypeof(CompareOperation* compare_expr,
void HandleLiteralCompareTypeof(CompareOperation* expr, Expression* expr,
Expression* sub_expr,
Handle<String> check); Handle<String> check);
void HandleLiteralCompareNil(CompareOperation* expr, void HandleLiteralCompareUndefined(CompareOperation* compare_expr,
Expression* sub_expr, Expression* expr);
NilValue nil);
HStringCharCodeAt* BuildStringCharCodeAt(HValue* context, HStringCharCodeAt* BuildStringCharCodeAt(HValue* context,
HValue* string, HValue* string,
@ -942,11 +938,6 @@ class HGraphBuilder: public AstVisitor {
HValue* val, HValue* val,
ElementsKind elements_kind, ElementsKind elements_kind,
bool is_store); bool is_store);
HInstruction* BuildFastElementAccess(HValue* elements,
HValue* checked_key,
HValue* val,
ElementsKind elements_kind,
bool is_store);
HInstruction* BuildMonomorphicElementAccess(HValue* object, HInstruction* BuildMonomorphicElementAccess(HValue* object,
HValue* key, HValue* key,

26
deps/v8/src/ia32/assembler-ia32-inl.h

@ -89,13 +89,8 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target) { void RelocInfo::set_target_address(Address target) {
Assembler::set_target_address_at(pc_, target);
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
if (host() != NULL && IsCodeTarget(rmode_)) { Assembler::set_target_address_at(pc_, target);
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
}
} }
@ -121,10 +116,6 @@ void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target; Memory::Object_at(pc_) = target;
CPU::FlushICache(pc_, sizeof(Address)); CPU::FlushICache(pc_, sizeof(Address));
if (host() != NULL && target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
host(), &Memory::Object_at(pc_), HeapObject::cast(target));
}
} }
@ -156,12 +147,6 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset; Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address; Memory::Address_at(pc_) = address;
CPU::FlushICache(pc_, sizeof(Address)); CPU::FlushICache(pc_, sizeof(Address));
if (host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
// evacuation candidate.
host()->GetHeap()->incremental_marking()->RecordWrite(
host(), NULL, cell);
}
} }
@ -176,11 +161,6 @@ void RelocInfo::set_call_address(Address target) {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
Assembler::set_target_address_at(pc_ + 1, target); Assembler::set_target_address_at(pc_ + 1, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
}
} }
@ -214,7 +194,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
void RelocInfo::Visit(ObjectVisitor* visitor) { void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode(); RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) { if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), target_object_address()); visitor->VisitPointer(target_object_address());
CPU::FlushICache(pc_, sizeof(Address)); CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) { } else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this); visitor->VisitCodeTarget(this);
@ -242,7 +222,7 @@ template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) { void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode(); RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) { if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitEmbeddedPointer(heap, host(), target_object_address()); StaticVisitor::VisitPointer(heap, target_object_address());
CPU::FlushICache(pc_, sizeof(Address)); CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) { } else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this); StaticVisitor::VisitCodeTarget(heap, this);

87
deps/v8/src/ia32/assembler-ia32.cc

@ -55,8 +55,6 @@ uint64_t CpuFeatures::supported_ = 0;
uint64_t CpuFeatures::found_by_runtime_probing_ = 0; uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
// The Probe method needs executable memory, so it uses Heap::CreateCode.
// Allocation failure is silent and leads to safe default.
void CpuFeatures::Probe() { void CpuFeatures::Probe() {
ASSERT(!initialized_); ASSERT(!initialized_);
ASSERT(supported_ == 0); ASSERT(supported_ == 0);
@ -88,23 +86,23 @@ void CpuFeatures::Probe() {
__ pushfd(); __ pushfd();
__ push(ecx); __ push(ecx);
__ push(ebx); __ push(ebx);
__ mov(ebp, esp); __ mov(ebp, Operand(esp));
// If we can modify bit 21 of the EFLAGS register, then CPUID is supported. // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
__ pushfd(); __ pushfd();
__ pop(eax); __ pop(eax);
__ mov(edx, eax); __ mov(edx, Operand(eax));
__ xor_(eax, 0x200000); // Flip bit 21. __ xor_(eax, 0x200000); // Flip bit 21.
__ push(eax); __ push(eax);
__ popfd(); __ popfd();
__ pushfd(); __ pushfd();
__ pop(eax); __ pop(eax);
__ xor_(eax, edx); // Different if CPUID is supported. __ xor_(eax, Operand(edx)); // Different if CPUID is supported.
__ j(not_zero, &cpuid); __ j(not_zero, &cpuid);
// CPUID not supported. Clear the supported features in edx:eax. // CPUID not supported. Clear the supported features in edx:eax.
__ xor_(eax, eax); __ xor_(eax, Operand(eax));
__ xor_(edx, edx); __ xor_(edx, Operand(edx));
__ jmp(&done); __ jmp(&done);
// Invoke CPUID with 1 in eax to get feature information in // Invoke CPUID with 1 in eax to get feature information in
@ -120,13 +118,13 @@ void CpuFeatures::Probe() {
// Move the result from ecx:edx to edx:eax and make sure to mark the // Move the result from ecx:edx to edx:eax and make sure to mark the
// CPUID feature as supported. // CPUID feature as supported.
__ mov(eax, edx); __ mov(eax, Operand(edx));
__ or_(eax, 1 << CPUID); __ or_(eax, 1 << CPUID);
__ mov(edx, ecx); __ mov(edx, Operand(ecx));
// Done. // Done.
__ bind(&done); __ bind(&done);
__ mov(esp, ebp); __ mov(esp, Operand(ebp));
__ pop(ebx); __ pop(ebx);
__ pop(ecx); __ pop(ecx);
__ popfd(); __ popfd();
@ -288,18 +286,6 @@ bool Operand::is_reg(Register reg) const {
&& ((buf_[0] & 0x07) == reg.code()); // register codes match. && ((buf_[0] & 0x07) == reg.code()); // register codes match.
} }
bool Operand::is_reg_only() const {
return (buf_[0] & 0xF8) == 0xC0; // Addressing mode is register only.
}
Register Operand::reg() const {
ASSERT(is_reg_only());
return Register::from_code(buf_[0] & 0x07);
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Implementation of Assembler. // Implementation of Assembler.
@ -715,13 +701,6 @@ void Assembler::add(Register dst, const Operand& src) {
} }
void Assembler::add(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x01);
emit_operand(src, dst);
}
void Assembler::add(const Operand& dst, const Immediate& x) { void Assembler::add(const Operand& dst, const Immediate& x) {
ASSERT(reloc_info_writer.last_pc() != NULL); ASSERT(reloc_info_writer.last_pc() != NULL);
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
@ -762,29 +741,25 @@ void Assembler::and_(const Operand& dst, Register src) {
void Assembler::cmpb(const Operand& op, int8_t imm8) { void Assembler::cmpb(const Operand& op, int8_t imm8) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
if (op.is_reg(eax)) { EMIT(0x80);
EMIT(0x3C); emit_operand(edi, op); // edi == 7
} else {
EMIT(0x80);
emit_operand(edi, op); // edi == 7
}
EMIT(imm8); EMIT(imm8);
} }
void Assembler::cmpb(const Operand& op, Register reg) { void Assembler::cmpb(const Operand& dst, Register src) {
ASSERT(reg.is_byte_register()); ASSERT(src.is_byte_register());
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
EMIT(0x38); EMIT(0x38);
emit_operand(reg, op); emit_operand(src, dst);
} }
void Assembler::cmpb(Register reg, const Operand& op) { void Assembler::cmpb(Register dst, const Operand& src) {
ASSERT(reg.is_byte_register()); ASSERT(dst.is_byte_register());
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
EMIT(0x3A); EMIT(0x3A);
emit_operand(reg, op); emit_operand(dst, src);
} }
@ -1094,6 +1069,18 @@ void Assembler::shr_cl(Register dst) {
} }
void Assembler::subb(const Operand& op, int8_t imm8) {
EnsureSpace ensure_space(this);
if (op.is_reg(eax)) {
EMIT(0x2c);
} else {
EMIT(0x80);
emit_operand(ebp, op); // ebp == 5
}
EMIT(imm8);
}
void Assembler::sub(const Operand& dst, const Immediate& x) { void Assembler::sub(const Operand& dst, const Immediate& x) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
emit_arith(5, dst, x); emit_arith(5, dst, x);
@ -1107,6 +1094,14 @@ void Assembler::sub(Register dst, const Operand& src) {
} }
void Assembler::subb(Register dst, const Operand& src) {
ASSERT(dst.code() < 4);
EnsureSpace ensure_space(this);
EMIT(0x2A);
emit_operand(dst, src);
}
void Assembler::sub(const Operand& dst, Register src) { void Assembler::sub(const Operand& dst, Register src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
EMIT(0x29); EMIT(0x29);
@ -1163,10 +1158,6 @@ void Assembler::test(const Operand& op, const Immediate& imm) {
void Assembler::test_b(const Operand& op, uint8_t imm8) { void Assembler::test_b(const Operand& op, uint8_t imm8) {
if (op.is_reg_only() && op.reg().code() >= 4) {
test(op, Immediate(imm8));
return;
}
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
EMIT(0xF6); EMIT(0xF6);
emit_operand(eax, op); emit_operand(eax, op);
@ -1187,10 +1178,10 @@ void Assembler::xor_(Register dst, const Operand& src) {
} }
void Assembler::xor_(const Operand& dst, Register src) { void Assembler::xor_(const Operand& src, Register dst) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
EMIT(0x31); EMIT(0x31);
emit_operand(src, dst); emit_operand(dst, src);
} }
@ -2480,7 +2471,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
return; return;
} }
} }
RelocInfo rinfo(pc_, rmode, data, NULL); RelocInfo rinfo(pc_, rmode, data);
reloc_info_writer.Write(&rinfo); reloc_info_writer.Write(&rinfo);
} }

88
deps/v8/src/ia32/assembler-ia32.h

@ -75,8 +75,6 @@ struct Register {
static inline Register FromAllocationIndex(int index); static inline Register FromAllocationIndex(int index);
static Register from_code(int code) { static Register from_code(int code) {
ASSERT(code >= 0);
ASSERT(code < kNumRegisters);
Register r = { code }; Register r = { code };
return r; return r;
} }
@ -302,6 +300,9 @@ enum ScaleFactor {
class Operand BASE_EMBEDDED { class Operand BASE_EMBEDDED {
public: public:
// reg
INLINE(explicit Operand(Register reg));
// XMM reg // XMM reg
INLINE(explicit Operand(XMMRegister xmm_reg)); INLINE(explicit Operand(XMMRegister xmm_reg));
@ -346,16 +347,12 @@ class Operand BASE_EMBEDDED {
// Returns true if this Operand is a wrapper for the specified register. // Returns true if this Operand is a wrapper for the specified register.
bool is_reg(Register reg) const; bool is_reg(Register reg) const;
// Returns true if this Operand is a wrapper for one register.
bool is_reg_only() const;
// Asserts that this Operand is a wrapper for one register and returns the
// register.
Register reg() const;
private: private:
// reg byte buf_[6];
INLINE(explicit Operand(Register reg)); // The number of bytes in buf_.
unsigned int len_;
// Only valid if len_ > 4.
RelocInfo::Mode rmode_;
// Set the ModRM byte without an encoded 'reg' register. The // Set the ModRM byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation. // register is encoded later as part of the emit_operand operation.
@ -365,15 +362,7 @@ class Operand BASE_EMBEDDED {
inline void set_disp8(int8_t disp); inline void set_disp8(int8_t disp);
inline void set_dispr(int32_t disp, RelocInfo::Mode rmode); inline void set_dispr(int32_t disp, RelocInfo::Mode rmode);
byte buf_[6];
// The number of bytes in buf_.
unsigned int len_;
// Only valid if len_ > 4.
RelocInfo::Mode rmode_;
friend class Assembler; friend class Assembler;
friend class MacroAssembler;
friend class LCodeGen;
}; };
@ -682,9 +671,7 @@ class Assembler : public AssemblerBase {
void leave(); void leave();
// Moves // Moves
void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
void mov_b(Register dst, const Operand& src); void mov_b(Register dst, const Operand& src);
void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
void mov_b(const Operand& dst, int8_t imm8); void mov_b(const Operand& dst, int8_t imm8);
void mov_b(const Operand& dst, Register src); void mov_b(const Operand& dst, Register src);
@ -700,24 +687,17 @@ class Assembler : public AssemblerBase {
void mov(const Operand& dst, Handle<Object> handle); void mov(const Operand& dst, Handle<Object> handle);
void mov(const Operand& dst, Register src); void mov(const Operand& dst, Register src);
void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); }
void movsx_b(Register dst, const Operand& src); void movsx_b(Register dst, const Operand& src);
void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); }
void movsx_w(Register dst, const Operand& src); void movsx_w(Register dst, const Operand& src);
void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); }
void movzx_b(Register dst, const Operand& src); void movzx_b(Register dst, const Operand& src);
void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); }
void movzx_w(Register dst, const Operand& src); void movzx_w(Register dst, const Operand& src);
// Conditional moves // Conditional moves
void cmov(Condition cc, Register dst, int32_t imm32); void cmov(Condition cc, Register dst, int32_t imm32);
void cmov(Condition cc, Register dst, Handle<Object> handle); void cmov(Condition cc, Register dst, Handle<Object> handle);
void cmov(Condition cc, Register dst, Register src) {
cmov(cc, dst, Operand(src));
}
void cmov(Condition cc, Register dst, const Operand& src); void cmov(Condition cc, Register dst, const Operand& src);
// Flag management. // Flag management.
@ -735,31 +715,24 @@ class Assembler : public AssemblerBase {
void adc(Register dst, int32_t imm32); void adc(Register dst, int32_t imm32);
void adc(Register dst, const Operand& src); void adc(Register dst, const Operand& src);
void add(Register dst, Register src) { add(dst, Operand(src)); }
void add(Register dst, const Operand& src); void add(Register dst, const Operand& src);
void add(const Operand& dst, Register src);
void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); }
void add(const Operand& dst, const Immediate& x); void add(const Operand& dst, const Immediate& x);
void and_(Register dst, int32_t imm32); void and_(Register dst, int32_t imm32);
void and_(Register dst, const Immediate& x); void and_(Register dst, const Immediate& x);
void and_(Register dst, Register src) { and_(dst, Operand(src)); }
void and_(Register dst, const Operand& src); void and_(Register dst, const Operand& src);
void and_(const Operand& dst, Register src); void and_(const Operand& src, Register dst);
void and_(const Operand& dst, const Immediate& x); void and_(const Operand& dst, const Immediate& x);
void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); }
void cmpb(const Operand& op, int8_t imm8); void cmpb(const Operand& op, int8_t imm8);
void cmpb(Register reg, const Operand& op); void cmpb(Register src, const Operand& dst);
void cmpb(const Operand& op, Register reg); void cmpb(const Operand& dst, Register src);
void cmpb_al(const Operand& op); void cmpb_al(const Operand& op);
void cmpw_ax(const Operand& op); void cmpw_ax(const Operand& op);
void cmpw(const Operand& op, Immediate imm16); void cmpw(const Operand& op, Immediate imm16);
void cmp(Register reg, int32_t imm32); void cmp(Register reg, int32_t imm32);
void cmp(Register reg, Handle<Object> handle); void cmp(Register reg, Handle<Object> handle);
void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
void cmp(Register reg, const Operand& op); void cmp(Register reg, const Operand& op);
void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
void cmp(const Operand& op, const Immediate& imm); void cmp(const Operand& op, const Immediate& imm);
void cmp(const Operand& op, Handle<Object> handle); void cmp(const Operand& op, Handle<Object> handle);
@ -775,7 +748,6 @@ class Assembler : public AssemblerBase {
// Signed multiply instructions. // Signed multiply instructions.
void imul(Register src); // edx:eax = eax * src. void imul(Register src); // edx:eax = eax * src.
void imul(Register dst, Register src) { imul(dst, Operand(src)); }
void imul(Register dst, const Operand& src); // dst = dst * src. void imul(Register dst, const Operand& src); // dst = dst * src.
void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32. void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32.
@ -792,10 +764,8 @@ class Assembler : public AssemblerBase {
void not_(Register dst); void not_(Register dst);
void or_(Register dst, int32_t imm32); void or_(Register dst, int32_t imm32);
void or_(Register dst, Register src) { or_(dst, Operand(src)); }
void or_(Register dst, const Operand& src); void or_(Register dst, const Operand& src);
void or_(const Operand& dst, Register src); void or_(const Operand& dst, Register src);
void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); }
void or_(const Operand& dst, const Immediate& x); void or_(const Operand& dst, const Immediate& x);
void rcl(Register dst, uint8_t imm8); void rcl(Register dst, uint8_t imm8);
@ -806,42 +776,35 @@ class Assembler : public AssemblerBase {
void sbb(Register dst, const Operand& src); void sbb(Register dst, const Operand& src);
void shld(Register dst, Register src) { shld(dst, Operand(src)); }
void shld(Register dst, const Operand& src); void shld(Register dst, const Operand& src);
void shl(Register dst, uint8_t imm8); void shl(Register dst, uint8_t imm8);
void shl_cl(Register dst); void shl_cl(Register dst);
void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
void shrd(Register dst, const Operand& src); void shrd(Register dst, const Operand& src);
void shr(Register dst, uint8_t imm8); void shr(Register dst, uint8_t imm8);
void shr_cl(Register dst); void shr_cl(Register dst);
void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); } void subb(const Operand& dst, int8_t imm8);
void subb(Register dst, const Operand& src);
void sub(const Operand& dst, const Immediate& x); void sub(const Operand& dst, const Immediate& x);
void sub(Register dst, Register src) { sub(dst, Operand(src)); }
void sub(Register dst, const Operand& src); void sub(Register dst, const Operand& src);
void sub(const Operand& dst, Register src); void sub(const Operand& dst, Register src);
void test(Register reg, const Immediate& imm); void test(Register reg, const Immediate& imm);
void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
void test(Register reg, const Operand& op); void test(Register reg, const Operand& op);
void test_b(Register reg, const Operand& op); void test_b(Register reg, const Operand& op);
void test(const Operand& op, const Immediate& imm); void test(const Operand& op, const Immediate& imm);
void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); }
void test_b(const Operand& op, uint8_t imm8); void test_b(const Operand& op, uint8_t imm8);
void xor_(Register dst, int32_t imm32); void xor_(Register dst, int32_t imm32);
void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
void xor_(Register dst, const Operand& src); void xor_(Register dst, const Operand& src);
void xor_(const Operand& dst, Register src); void xor_(const Operand& src, Register dst);
void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); }
void xor_(const Operand& dst, const Immediate& x); void xor_(const Operand& dst, const Immediate& x);
// Bit operations. // Bit operations.
void bt(const Operand& dst, Register src); void bt(const Operand& dst, Register src);
void bts(Register dst, Register src) { bts(Operand(dst), src); }
void bts(const Operand& dst, Register src); void bts(const Operand& dst, Register src);
// Miscellaneous // Miscellaneous
@ -872,7 +835,6 @@ class Assembler : public AssemblerBase {
void call(Label* L); void call(Label* L);
void call(byte* entry, RelocInfo::Mode rmode); void call(byte* entry, RelocInfo::Mode rmode);
int CallSize(const Operand& adr); int CallSize(const Operand& adr);
void call(Register reg) { call(Operand(reg)); }
void call(const Operand& adr); void call(const Operand& adr);
int CallSize(Handle<Code> code, RelocInfo::Mode mode); int CallSize(Handle<Code> code, RelocInfo::Mode mode);
void call(Handle<Code> code, void call(Handle<Code> code,
@ -883,7 +845,6 @@ class Assembler : public AssemblerBase {
// unconditional jump to L // unconditional jump to L
void jmp(Label* L, Label::Distance distance = Label::kFar); void jmp(Label* L, Label::Distance distance = Label::kFar);
void jmp(byte* entry, RelocInfo::Mode rmode); void jmp(byte* entry, RelocInfo::Mode rmode);
void jmp(Register reg) { jmp(Operand(reg)); }
void jmp(const Operand& adr); void jmp(const Operand& adr);
void jmp(Handle<Code> code, RelocInfo::Mode rmode); void jmp(Handle<Code> code, RelocInfo::Mode rmode);
@ -968,7 +929,6 @@ class Assembler : public AssemblerBase {
void cvttss2si(Register dst, const Operand& src); void cvttss2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, const Operand& src); void cvttsd2si(Register dst, const Operand& src);
void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
void cvtsi2sd(XMMRegister dst, const Operand& src); void cvtsi2sd(XMMRegister dst, const Operand& src);
void cvtss2sd(XMMRegister dst, XMMRegister src); void cvtss2sd(XMMRegister dst, XMMRegister src);
void cvtsd2ss(XMMRegister dst, XMMRegister src); void cvtsd2ss(XMMRegister dst, XMMRegister src);
@ -1009,14 +969,12 @@ class Assembler : public AssemblerBase {
void movdbl(XMMRegister dst, const Operand& src); void movdbl(XMMRegister dst, const Operand& src);
void movdbl(const Operand& dst, XMMRegister src); void movdbl(const Operand& dst, XMMRegister src);
void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
void movd(XMMRegister dst, const Operand& src); void movd(XMMRegister dst, const Operand& src);
void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); } void movd(const Operand& src, XMMRegister dst);
void movd(const Operand& dst, XMMRegister src);
void movsd(XMMRegister dst, XMMRegister src); void movsd(XMMRegister dst, XMMRegister src);
void movss(XMMRegister dst, const Operand& src); void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src); void movss(const Operand& src, XMMRegister dst);
void movss(XMMRegister dst, XMMRegister src); void movss(XMMRegister dst, XMMRegister src);
void pand(XMMRegister dst, XMMRegister src); void pand(XMMRegister dst, XMMRegister src);
@ -1029,17 +987,11 @@ class Assembler : public AssemblerBase {
void psrlq(XMMRegister reg, int8_t shift); void psrlq(XMMRegister reg, int8_t shift);
void psrlq(XMMRegister dst, XMMRegister src); void psrlq(XMMRegister dst, XMMRegister src);
void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle); void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
void pextrd(Register dst, XMMRegister src, int8_t offset) {
pextrd(Operand(dst), src, offset);
}
void pextrd(const Operand& dst, XMMRegister src, int8_t offset); void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
void pinsrd(XMMRegister dst, Register src, int8_t offset) {
pinsrd(dst, Operand(src), offset);
}
void pinsrd(XMMRegister dst, const Operand& src, int8_t offset); void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
// Parallel XMM operations. // Parallel XMM operations.
void movntdqa(XMMRegister dst, const Operand& src); void movntdqa(XMMRegister src, const Operand& dst);
void movntdq(const Operand& dst, XMMRegister src); void movntdq(const Operand& dst, XMMRegister src);
// Prefetch src position into cache level. // Prefetch src position into cache level.
// Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
@ -1093,9 +1045,6 @@ class Assembler : public AssemblerBase {
static const int kMaximalBufferSize = 512*MB; static const int kMaximalBufferSize = 512*MB;
static const int kMinimalBufferSize = 4*KB; static const int kMinimalBufferSize = 4*KB;
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
protected: protected:
bool emit_debug_code() const { return emit_debug_code_; } bool emit_debug_code() const { return emit_debug_code_; }
@ -1108,8 +1057,9 @@ class Assembler : public AssemblerBase {
byte* addr_at(int pos) { return buffer_ + pos; } byte* addr_at(int pos) { return buffer_ + pos; }
private: private:
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
uint32_t long_at(int pos) { uint32_t long_at(int pos) {
return *reinterpret_cast<uint32_t*>(addr_at(pos)); return *reinterpret_cast<uint32_t*>(addr_at(pos));
} }

1031
deps/v8/src/ia32/builtins-ia32.cc

File diff suppressed because it is too large

1112
deps/v8/src/ia32/code-stubs-ia32.cc

File diff suppressed because it is too large

291
deps/v8/src/ia32/code-stubs-ia32.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -60,25 +60,6 @@ class TranscendentalCacheStub: public CodeStub {
}; };
class StoreBufferOverflowStub: public CodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
void Generate(MacroAssembler* masm);
virtual bool IsPregenerated() { return true; }
static void GenerateFixedRegStubsAheadOfTime();
virtual bool SometimesSetsUpAFrame() { return false; }
private:
SaveFPRegsMode save_doubles_;
Major MajorKey() { return StoreBufferOverflow; }
int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
};
class UnaryOpStub: public CodeStub { class UnaryOpStub: public CodeStub {
public: public:
UnaryOpStub(Token::Value op, UnaryOpStub(Token::Value op,
@ -437,8 +418,6 @@ class StringDictionaryLookupStub: public CodeStub {
Register r0, Register r0,
Register r1); Register r1);
virtual bool SometimesSetsUpAFrame() { return false; }
private: private:
static const int kInlinedProbes = 4; static const int kInlinedProbes = 4;
static const int kTotalProbes = 20; static const int kTotalProbes = 20;
@ -451,7 +430,7 @@ class StringDictionaryLookupStub: public CodeStub {
StringDictionary::kHeaderSize + StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize; StringDictionary::kElementsStartIndex * kPointerSize;
Major MajorKey() { return StringDictionaryLookup; } Major MajorKey() { return StringDictionaryNegativeLookup; }
int MinorKey() { int MinorKey() {
return DictionaryBits::encode(dictionary_.code()) | return DictionaryBits::encode(dictionary_.code()) |
@ -472,272 +451,6 @@ class StringDictionaryLookupStub: public CodeStub {
}; };
class RecordWriteStub: public CodeStub {
public:
RecordWriteStub(Register object,
Register value,
Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
: object_(object),
value_(value),
address_(address),
remembered_set_action_(remembered_set_action),
save_fp_regs_mode_(fp_mode),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
}
enum Mode {
STORE_BUFFER_ONLY,
INCREMENTAL,
INCREMENTAL_COMPACTION
};
virtual bool IsPregenerated();
static void GenerateFixedRegStubsAheadOfTime();
virtual bool SometimesSetsUpAFrame() { return false; }
static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8.
static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32.
static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32.
static Mode GetMode(Code* stub) {
byte first_instruction = stub->instruction_start()[0];
byte second_instruction = stub->instruction_start()[2];
if (first_instruction == kTwoByteJumpInstruction) {
return INCREMENTAL;
}
ASSERT(first_instruction == kTwoByteNopInstruction);
if (second_instruction == kFiveByteJumpInstruction) {
return INCREMENTAL_COMPACTION;
}
ASSERT(second_instruction == kFiveByteNopInstruction);
return STORE_BUFFER_ONLY;
}
static void Patch(Code* stub, Mode mode) {
switch (mode) {
case STORE_BUFFER_ONLY:
ASSERT(GetMode(stub) == INCREMENTAL ||
GetMode(stub) == INCREMENTAL_COMPACTION);
stub->instruction_start()[0] = kTwoByteNopInstruction;
stub->instruction_start()[2] = kFiveByteNopInstruction;
break;
case INCREMENTAL:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
stub->instruction_start()[0] = kTwoByteJumpInstruction;
break;
case INCREMENTAL_COMPACTION:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
stub->instruction_start()[0] = kTwoByteNopInstruction;
stub->instruction_start()[2] = kFiveByteJumpInstruction;
break;
}
ASSERT(GetMode(stub) == mode);
CPU::FlushICache(stub->instruction_start(), 7);
}
private:
// This is a helper class for freeing up 3 scratch registers, where the third
// is always ecx (needed for shift operations). The input is two registers
// that must be preserved and one scratch register provided by the caller.
class RegisterAllocation {
public:
RegisterAllocation(Register object,
Register address,
Register scratch0)
: object_orig_(object),
address_orig_(address),
scratch0_orig_(scratch0),
object_(object),
address_(address),
scratch0_(scratch0) {
ASSERT(!AreAliased(scratch0, object, address, no_reg));
scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_);
if (scratch0.is(ecx)) {
scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_);
}
if (object.is(ecx)) {
object_ = GetRegThatIsNotEcxOr(address_, scratch0_, scratch1_);
}
if (address.is(ecx)) {
address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_);
}
ASSERT(!AreAliased(scratch0_, object_, address_, ecx));
}
void Save(MacroAssembler* masm) {
ASSERT(!address_orig_.is(object_));
ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
// We don't have to save scratch0_orig_ because it was given to us as
// a scratch register. But if we had to switch to a different reg then
// we should save the new scratch0_.
if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
if (!ecx.is(scratch0_orig_) &&
!ecx.is(object_orig_) &&
!ecx.is(address_orig_)) {
masm->push(ecx);
}
masm->push(scratch1_);
if (!address_.is(address_orig_)) {
masm->push(address_);
masm->mov(address_, address_orig_);
}
if (!object_.is(object_orig_)) {
masm->push(object_);
masm->mov(object_, object_orig_);
}
}
void Restore(MacroAssembler* masm) {
// These will have been preserved the entire time, so we just need to move
// them back. Only in one case is the orig_ reg different from the plain
// one, since only one of them can alias with ecx.
if (!object_.is(object_orig_)) {
masm->mov(object_orig_, object_);
masm->pop(object_);
}
if (!address_.is(address_orig_)) {
masm->mov(address_orig_, address_);
masm->pop(address_);
}
masm->pop(scratch1_);
if (!ecx.is(scratch0_orig_) &&
!ecx.is(object_orig_) &&
!ecx.is(address_orig_)) {
masm->pop(ecx);
}
if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
}
// If we have to call into C then we need to save and restore all caller-
// saved registers that were not already preserved. The caller saved
// registers are eax, ecx and edx. The three scratch registers (incl. ecx)
// will be restored by other means so we don't bother pushing them here.
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
if (mode == kSaveFPRegs) {
CpuFeatures::Scope scope(SSE2);
masm->sub(esp,
Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
// Save all XMM registers except XMM0.
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
masm->movdbl(Operand(esp, (i - 1) * kDoubleSize), reg);
}
}
}
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
CpuFeatures::Scope scope(SSE2);
// Restore all XMM registers except XMM0.
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize));
}
masm->add(esp,
Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
}
if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
}
inline Register object() { return object_; }
inline Register address() { return address_; }
inline Register scratch0() { return scratch0_; }
inline Register scratch1() { return scratch1_; }
private:
Register object_orig_;
Register address_orig_;
Register scratch0_orig_;
Register object_;
Register address_;
Register scratch0_;
Register scratch1_;
// Third scratch register is always ecx.
Register GetRegThatIsNotEcxOr(Register r1,
Register r2,
Register r3) {
for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(ecx)) continue;
if (candidate.is(r1)) continue;
if (candidate.is(r2)) continue;
if (candidate.is(r3)) continue;
return candidate;
}
UNREACHABLE();
return no_reg;
}
friend class RecordWriteStub;
};
enum OnNoNeedToInformIncrementalMarker {
kReturnOnNoNeedToInformIncrementalMarker,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
}
;
void Generate(MacroAssembler* masm);
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
Major MajorKey() { return RecordWrite; }
int MinorKey() {
return ObjectBits::encode(object_.code()) |
ValueBits::encode(value_.code()) |
AddressBits::encode(address_.code()) |
RememberedSetActionBits::encode(remembered_set_action_) |
SaveFPRegsModeBits::encode(save_fp_regs_mode_);
}
bool MustBeInStubCache() {
// All stubs must be registered in the stub cache
// otherwise IncrementalMarker would not be able to find
// and patch it.
return true;
}
void Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
class ObjectBits: public BitField<int, 0, 3> {};
class ValueBits: public BitField<int, 3, 3> {};
class AddressBits: public BitField<int, 6, 3> {};
class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {};
class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 10, 1> {};
Register object_;
Register value_;
Register address_;
RememberedSetAction remembered_set_action_;
SaveFPRegsMode save_fp_regs_mode_;
RegisterAllocation regs_;
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_IA32_CODE_STUBS_IA32_H_ #endif // V8_IA32_CODE_STUBS_IA32_H_

46
deps/v8/src/ia32/codegen-ia32.cc

@ -39,16 +39,12 @@ namespace internal {
// Platform-specific RuntimeCallHelper functions. // Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterFrame(StackFrame::INTERNAL); masm->EnterInternalFrame();
ASSERT(!masm->has_frame());
masm->set_has_frame(true);
} }
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->LeaveFrame(StackFrame::INTERNAL); masm->LeaveInternalFrame();
ASSERT(masm->has_frame());
masm->set_has_frame(false);
} }
@ -112,14 +108,14 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ mov(edx, dst); __ mov(edx, dst);
__ and_(edx, 0xF); __ and_(edx, 0xF);
__ neg(edx); __ neg(edx);
__ add(edx, Immediate(16)); __ add(Operand(edx), Immediate(16));
__ add(dst, edx); __ add(dst, Operand(edx));
__ add(src, edx); __ add(src, Operand(edx));
__ sub(count, edx); __ sub(Operand(count), edx);
// edi is now aligned. Check if esi is also aligned. // edi is now aligned. Check if esi is also aligned.
Label unaligned_source; Label unaligned_source;
__ test(src, Immediate(0x0F)); __ test(Operand(src), Immediate(0x0F));
__ j(not_zero, &unaligned_source); __ j(not_zero, &unaligned_source);
{ {
// Copy loop for aligned source and destination. // Copy loop for aligned source and destination.
@ -134,11 +130,11 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ prefetch(Operand(src, 0x20), 1); __ prefetch(Operand(src, 0x20), 1);
__ movdqa(xmm0, Operand(src, 0x00)); __ movdqa(xmm0, Operand(src, 0x00));
__ movdqa(xmm1, Operand(src, 0x10)); __ movdqa(xmm1, Operand(src, 0x10));
__ add(src, Immediate(0x20)); __ add(Operand(src), Immediate(0x20));
__ movdqa(Operand(dst, 0x00), xmm0); __ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1); __ movdqa(Operand(dst, 0x10), xmm1);
__ add(dst, Immediate(0x20)); __ add(Operand(dst), Immediate(0x20));
__ dec(loop_count); __ dec(loop_count);
__ j(not_zero, &loop); __ j(not_zero, &loop);
@ -146,12 +142,12 @@ OS::MemCopyFunction CreateMemCopyFunction() {
// At most 31 bytes to copy. // At most 31 bytes to copy.
Label move_less_16; Label move_less_16;
__ test(count, Immediate(0x10)); __ test(Operand(count), Immediate(0x10));
__ j(zero, &move_less_16); __ j(zero, &move_less_16);
__ movdqa(xmm0, Operand(src, 0)); __ movdqa(xmm0, Operand(src, 0));
__ add(src, Immediate(0x10)); __ add(Operand(src), Immediate(0x10));
__ movdqa(Operand(dst, 0), xmm0); __ movdqa(Operand(dst, 0), xmm0);
__ add(dst, Immediate(0x10)); __ add(Operand(dst), Immediate(0x10));
__ bind(&move_less_16); __ bind(&move_less_16);
// At most 15 bytes to copy. Copy 16 bytes at end of string. // At most 15 bytes to copy. Copy 16 bytes at end of string.
@ -180,11 +176,11 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ prefetch(Operand(src, 0x20), 1); __ prefetch(Operand(src, 0x20), 1);
__ movdqu(xmm0, Operand(src, 0x00)); __ movdqu(xmm0, Operand(src, 0x00));
__ movdqu(xmm1, Operand(src, 0x10)); __ movdqu(xmm1, Operand(src, 0x10));
__ add(src, Immediate(0x20)); __ add(Operand(src), Immediate(0x20));
__ movdqa(Operand(dst, 0x00), xmm0); __ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1); __ movdqa(Operand(dst, 0x10), xmm1);
__ add(dst, Immediate(0x20)); __ add(Operand(dst), Immediate(0x20));
__ dec(loop_count); __ dec(loop_count);
__ j(not_zero, &loop); __ j(not_zero, &loop);
@ -192,12 +188,12 @@ OS::MemCopyFunction CreateMemCopyFunction() {
// At most 31 bytes to copy. // At most 31 bytes to copy.
Label move_less_16; Label move_less_16;
__ test(count, Immediate(0x10)); __ test(Operand(count), Immediate(0x10));
__ j(zero, &move_less_16); __ j(zero, &move_less_16);
__ movdqu(xmm0, Operand(src, 0)); __ movdqu(xmm0, Operand(src, 0));
__ add(src, Immediate(0x10)); __ add(Operand(src), Immediate(0x10));
__ movdqa(Operand(dst, 0), xmm0); __ movdqa(Operand(dst, 0), xmm0);
__ add(dst, Immediate(0x10)); __ add(Operand(dst), Immediate(0x10));
__ bind(&move_less_16); __ bind(&move_less_16);
// At most 15 bytes to copy. Copy 16 bytes at end of string. // At most 15 bytes to copy. Copy 16 bytes at end of string.
@ -232,10 +228,10 @@ OS::MemCopyFunction CreateMemCopyFunction() {
__ mov(edx, dst); __ mov(edx, dst);
__ and_(edx, 0x03); __ and_(edx, 0x03);
__ neg(edx); __ neg(edx);
__ add(edx, Immediate(4)); // edx = 4 - (dst & 3) __ add(Operand(edx), Immediate(4)); // edx = 4 - (dst & 3)
__ add(dst, edx); __ add(dst, Operand(edx));
__ add(src, edx); __ add(src, Operand(edx));
__ sub(count, edx); __ sub(Operand(count), edx);
// edi is now aligned, ecx holds number of remaning bytes to copy. // edi is now aligned, ecx holds number of remaning bytes to copy.
__ mov(edx, count); __ mov(edx, count);

95
deps/v8/src/ia32/debug-ia32.cc

@ -100,64 +100,63 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList non_object_regs, RegList non_object_regs,
bool convert_call_to_jmp) { bool convert_call_to_jmp) {
// Enter an internal frame. // Enter an internal frame.
{ __ EnterInternalFrame();
FrameScope scope(masm, StackFrame::INTERNAL);
// Store the registers containing live values on the expression stack to
// Store the registers containing live values on the expression stack to // make sure that these are correctly updated during GC. Non object values
// make sure that these are correctly updated during GC. Non object values // are stored as a smi causing it to be untouched by GC.
// are stored as a smi causing it to be untouched by GC. ASSERT((object_regs & ~kJSCallerSaved) == 0);
ASSERT((object_regs & ~kJSCallerSaved) == 0); ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
ASSERT((non_object_regs & ~kJSCallerSaved) == 0); ASSERT((object_regs & non_object_regs) == 0);
ASSERT((object_regs & non_object_regs) == 0); for (int i = 0; i < kNumJSCallerSaved; i++) {
for (int i = 0; i < kNumJSCallerSaved; i++) { int r = JSCallerSavedCode(i);
int r = JSCallerSavedCode(i); Register reg = { r };
Register reg = { r }; if ((object_regs & (1 << r)) != 0) {
if ((object_regs & (1 << r)) != 0) { __ push(reg);
__ push(reg); }
} if ((non_object_regs & (1 << r)) != 0) {
if ((non_object_regs & (1 << r)) != 0) { if (FLAG_debug_code) {
if (FLAG_debug_code) { __ test(reg, Immediate(0xc0000000));
__ test(reg, Immediate(0xc0000000)); __ Assert(zero, "Unable to encode value as smi");
__ Assert(zero, "Unable to encode value as smi");
}
__ SmiTag(reg);
__ push(reg);
} }
__ SmiTag(reg);
__ push(reg);
} }
}
#ifdef DEBUG #ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over"); __ RecordComment("// Calling from debug break to runtime - come in - over");
#endif #endif
__ Set(eax, Immediate(0)); // No arguments. __ Set(eax, Immediate(0)); // No arguments.
__ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate()))); __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
CEntryStub ceb(1); CEntryStub ceb(1);
__ CallStub(&ceb); __ CallStub(&ceb);
// Restore the register values containing object pointers from the // Restore the register values containing object pointers from the expression
// expression stack. // stack.
for (int i = kNumJSCallerSaved; --i >= 0;) { for (int i = kNumJSCallerSaved; --i >= 0;) {
int r = JSCallerSavedCode(i); int r = JSCallerSavedCode(i);
Register reg = { r }; Register reg = { r };
if (FLAG_debug_code) { if (FLAG_debug_code) {
__ Set(reg, Immediate(kDebugZapValue)); __ Set(reg, Immediate(kDebugZapValue));
} }
if ((object_regs & (1 << r)) != 0) { if ((object_regs & (1 << r)) != 0) {
__ pop(reg); __ pop(reg);
} }
if ((non_object_regs & (1 << r)) != 0) { if ((non_object_regs & (1 << r)) != 0) {
__ pop(reg); __ pop(reg);
__ SmiUntag(reg); __ SmiUntag(reg);
}
} }
// Get rid of the internal frame.
} }
// Get rid of the internal frame.
__ LeaveInternalFrame();
// If this call did not replace a call but patched other code then there will // If this call did not replace a call but patched other code then there will
// be an unwanted return address left on the stack. Here we get rid of that. // be an unwanted return address left on the stack. Here we get rid of that.
if (convert_call_to_jmp) { if (convert_call_to_jmp) {
__ add(esp, Immediate(kPointerSize)); __ add(Operand(esp), Immediate(kPointerSize));
} }
// Now that the break point has been handled, resume normal execution by // Now that the break point has been handled, resume normal execution by
@ -299,7 +298,7 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ lea(edx, FieldOperand(edx, Code::kHeaderSize)); __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
// Re-run JSFunction, edi is function, esi is context. // Re-run JSFunction, edi is function, esi is context.
__ jmp(edx); __ jmp(Operand(edx));
} }
const bool Debug::kFrameDropperSupported = true; const bool Debug::kFrameDropperSupported = true;

100
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -116,7 +116,7 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
new_reloc->GetDataStartAddress() + padding, 0); new_reloc->GetDataStartAddress() + padding, 0);
intptr_t comment_string intptr_t comment_string
= reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString); = reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL); RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string);
for (int i = 0; i < additional_comments; ++i) { for (int i = 0; i < additional_comments; ++i) {
#ifdef DEBUG #ifdef DEBUG
byte* pos_before = reloc_info_writer.pos(); byte* pos_before = reloc_info_writer.pos();
@ -174,8 +174,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// We use RUNTIME_ENTRY for deoptimization bailouts. // We use RUNTIME_ENTRY for deoptimization bailouts.
RelocInfo rinfo(curr_address + 1, // 1 after the call opcode. RelocInfo rinfo(curr_address + 1, // 1 after the call opcode.
RelocInfo::RUNTIME_ENTRY, RelocInfo::RUNTIME_ENTRY,
reinterpret_cast<intptr_t>(deopt_entry), reinterpret_cast<intptr_t>(deopt_entry));
NULL);
reloc_info_writer.Write(&rinfo); reloc_info_writer.Write(&rinfo);
ASSERT_GE(reloc_info_writer.pos(), ASSERT_GE(reloc_info_writer.pos(),
reloc_info->address() + ByteArray::kHeaderSize); reloc_info->address() + ByteArray::kHeaderSize);
@ -206,11 +205,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
node->set_next(data->deoptimizing_code_list_); node->set_next(data->deoptimizing_code_list_);
data->deoptimizing_code_list_ = node; data->deoptimizing_code_list_ = node;
// We might be in the middle of incremental marking with compaction.
// Tell collector to treat this code object in a special way and
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(code);
// Set the code for the function to non-optimized version. // Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code()); function->ReplaceCode(function->shared()->code());
@ -227,8 +221,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
} }
void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
Address pc_after,
Code* check_code, Code* check_code,
Code* replacement_code) { Code* replacement_code) {
Address call_target_address = pc_after - kIntSize; Address call_target_address = pc_after - kIntSize;
@ -257,13 +250,6 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
*(call_target_address - 2) = 0x90; // nop *(call_target_address - 2) = 0x90; // nop
Assembler::set_target_address_at(call_target_address, Assembler::set_target_address_at(call_target_address,
replacement_code->entry()); replacement_code->entry());
RelocInfo rinfo(call_target_address,
RelocInfo::CODE_TARGET,
0,
unoptimized_code);
unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode(
unoptimized_code, &rinfo, replacement_code);
} }
@ -282,9 +268,6 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
*(call_target_address - 2) = 0x07; // offset *(call_target_address - 2) = 0x07; // offset
Assembler::set_target_address_at(call_target_address, Assembler::set_target_address_at(call_target_address,
check_code->entry()); check_code->entry());
check_code->GetHeap()->incremental_marking()->
RecordCodeTargetPatch(call_target_address, check_code);
} }
@ -432,14 +415,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_)); output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else { } else {
// Setup the frame pointer and the context pointer. // Setup the frame pointer and the context pointer.
// All OSR stack frames are dynamically aligned to an 8-byte boundary. output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code()));
int frame_pointer = input_->GetRegister(ebp.code());
if ((frame_pointer & 0x4) == 0) {
// Return address at FP + 4 should be aligned, so FP mod 8 should be 4.
frame_pointer -= kPointerSize;
has_alignment_padding_ = 1;
}
output_[0]->SetRegister(ebp.code(), frame_pointer);
output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code())); output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
unsigned pc_offset = data->OsrPcOffset()->value(); unsigned pc_offset = data->OsrPcOffset()->value();
@ -504,11 +480,9 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// top address and the current frame's size. // top address and the current frame's size.
uint32_t top_address; uint32_t top_address;
if (is_bottommost) { if (is_bottommost) {
// If the optimized frame had alignment padding, adjust the frame pointer // 2 = context and function in the frame.
// to point to the new position of the old frame pointer after padding top_address =
// is removed. Subtract 2 * kPointerSize for the context and function slots. input_->GetRegister(ebp.code()) - (2 * kPointerSize) - height_in_bytes;
top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
height_in_bytes + has_alignment_padding_ * kPointerSize;
} else { } else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size; top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
} }
@ -559,9 +533,7 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
} }
output_frame->SetFrameSlot(output_offset, value); output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset; intptr_t fp_value = top_address + output_offset;
ASSERT(!is_bottommost || ASSERT(!is_bottommost || input_->GetRegister(ebp.code()) == fp_value);
input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize
== fp_value);
output_frame->SetFp(fp_value); output_frame->SetFp(fp_value);
if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value); if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
if (FLAG_trace_deopt) { if (FLAG_trace_deopt) {
@ -666,7 +638,7 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize = kDoubleSize * const int kDoubleRegsSize = kDoubleSize *
XMMRegister::kNumAllocatableRegisters; XMMRegister::kNumAllocatableRegisters;
__ sub(esp, Immediate(kDoubleRegsSize)); __ sub(Operand(esp), Immediate(kDoubleRegsSize));
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize; int offset = i * kDoubleSize;
@ -690,7 +662,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize)); __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
__ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize)); __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
} }
__ sub(edx, ebp); __ sub(edx, Operand(ebp));
__ neg(edx); __ neg(edx);
// Allocate a new deoptimizer object. // Allocate a new deoptimizer object.
@ -703,10 +675,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta. __ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta.
__ mov(Operand(esp, 5 * kPointerSize), __ mov(Operand(esp, 5 * kPointerSize),
Immediate(ExternalReference::isolate_address())); Immediate(ExternalReference::isolate_address()));
{ __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
}
// Preserve deoptimizer object in register eax and get the input // Preserve deoptimizer object in register eax and get the input
// frame descriptor pointer. // frame descriptor pointer.
@ -729,15 +698,15 @@ void Deoptimizer::EntryGenerator::Generate() {
// Remove the bailout id and the double registers from the stack. // Remove the bailout id and the double registers from the stack.
if (type() == EAGER) { if (type() == EAGER) {
__ add(esp, Immediate(kDoubleRegsSize + kPointerSize)); __ add(Operand(esp), Immediate(kDoubleRegsSize + kPointerSize));
} else { } else {
__ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize)); __ add(Operand(esp), Immediate(kDoubleRegsSize + 2 * kPointerSize));
} }
// Compute a pointer to the unwinding limit in register ecx; that is // Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame. // the first stack slot not part of the input frame.
__ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset())); __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
__ add(ecx, esp); __ add(ecx, Operand(esp));
// Unwind the stack down to - but not including - the unwinding // Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input // limit and copy the contents of the activation frame to the input
@ -746,43 +715,18 @@ void Deoptimizer::EntryGenerator::Generate() {
Label pop_loop; Label pop_loop;
__ bind(&pop_loop); __ bind(&pop_loop);
__ pop(Operand(edx, 0)); __ pop(Operand(edx, 0));
__ add(edx, Immediate(sizeof(uint32_t))); __ add(Operand(edx), Immediate(sizeof(uint32_t)));
__ cmp(ecx, esp); __ cmp(ecx, Operand(esp));
__ j(not_equal, &pop_loop); __ j(not_equal, &pop_loop);
// If frame was dynamically aligned, pop padding.
Label sentinel, sentinel_done;
__ pop(ecx);
__ cmp(ecx, Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
__ j(equal, &sentinel);
__ push(ecx);
__ jmp(&sentinel_done);
__ bind(&sentinel);
__ mov(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
Immediate(1));
__ bind(&sentinel_done);
// Compute the output frame in the deoptimizer. // Compute the output frame in the deoptimizer.
__ push(eax); __ push(eax);
__ PrepareCallCFunction(1, ebx); __ PrepareCallCFunction(1, ebx);
__ mov(Operand(esp, 0 * kPointerSize), eax); __ mov(Operand(esp, 0 * kPointerSize), eax);
{ __ CallCFunction(
AllowExternalCallThatCantCauseGC scope(masm()); ExternalReference::compute_output_frames_function(isolate), 1);
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate), 1);
}
__ pop(eax); __ pop(eax);
if (type() == OSR) {
// If alignment padding is added, push the sentinel.
Label no_osr_padding;
__ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
Immediate(0));
__ j(equal, &no_osr_padding, Label::kNear);
__ push(Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
__ bind(&no_osr_padding);
}
// Replace the current frame with the output frames. // Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop; Label outer_push_loop, inner_push_loop;
// Outer loop state: eax = current FrameDescription**, edx = one past the // Outer loop state: eax = current FrameDescription**, edx = one past the
@ -795,12 +739,12 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(ebx, Operand(eax, 0)); __ mov(ebx, Operand(eax, 0));
__ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset())); __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
__ bind(&inner_push_loop); __ bind(&inner_push_loop);
__ sub(ecx, Immediate(sizeof(uint32_t))); __ sub(Operand(ecx), Immediate(sizeof(uint32_t)));
__ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset())); __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
__ test(ecx, ecx); __ test(ecx, Operand(ecx));
__ j(not_zero, &inner_push_loop); __ j(not_zero, &inner_push_loop);
__ add(eax, Immediate(kPointerSize)); __ add(Operand(eax), Immediate(kPointerSize));
__ cmp(eax, edx); __ cmp(eax, Operand(edx));
__ j(below, &outer_push_loop); __ j(below, &outer_push_loop);
// In case of OSR, we have to restore the XMM registers. // In case of OSR, we have to restore the XMM registers.

29
deps/v8/src/ia32/disasm-ia32.cc

@ -55,7 +55,6 @@ struct ByteMnemonic {
static const ByteMnemonic two_operands_instr[] = { static const ByteMnemonic two_operands_instr[] = {
{0x01, "add", OPER_REG_OP_ORDER},
{0x03, "add", REG_OPER_OP_ORDER}, {0x03, "add", REG_OPER_OP_ORDER},
{0x09, "or", OPER_REG_OP_ORDER}, {0x09, "or", OPER_REG_OP_ORDER},
{0x0B, "or", REG_OPER_OP_ORDER}, {0x0B, "or", REG_OPER_OP_ORDER},
@ -118,19 +117,6 @@ static const ByteMnemonic short_immediate_instr[] = {
}; };
// Generally we don't want to generate these because they are subject to partial
// register stalls. They are included for completeness and because the cmp
// variant is used by the RecordWrite stub. Because it does not update the
// register it is not subject to partial register stalls.
static ByteMnemonic byte_immediate_instr[] = {
{0x0c, "or", UNSET_OP_ORDER},
{0x24, "and", UNSET_OP_ORDER},
{0x34, "xor", UNSET_OP_ORDER},
{0x3c, "cmp", UNSET_OP_ORDER},
{-1, "", UNSET_OP_ORDER}
};
static const char* const jump_conditional_mnem[] = { static const char* const jump_conditional_mnem[] = {
/*0*/ "jo", "jno", "jc", "jnc", /*0*/ "jo", "jno", "jc", "jnc",
/*4*/ "jz", "jnz", "jna", "ja", /*4*/ "jz", "jnz", "jna", "ja",
@ -163,8 +149,7 @@ enum InstructionType {
REGISTER_INSTR, REGISTER_INSTR,
MOVE_REG_INSTR, MOVE_REG_INSTR,
CALL_JUMP_INSTR, CALL_JUMP_INSTR,
SHORT_IMMEDIATE_INSTR, SHORT_IMMEDIATE_INSTR
BYTE_IMMEDIATE_INSTR
}; };
@ -213,7 +198,6 @@ void InstructionTable::Init() {
CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR); CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
CopyTable(call_jump_instr, CALL_JUMP_INSTR); CopyTable(call_jump_instr, CALL_JUMP_INSTR);
CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR); CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
CopyTable(byte_immediate_instr, BYTE_IMMEDIATE_INSTR);
AddJumpConditionalShort(); AddJumpConditionalShort();
SetTableRange(REGISTER_INSTR, 0x40, 0x47, "inc"); SetTableRange(REGISTER_INSTR, 0x40, 0x47, "inc");
SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec"); SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec");
@ -928,12 +912,6 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
break; break;
} }
case BYTE_IMMEDIATE_INSTR: {
AppendToBuffer("%s al, 0x%x", idesc.mnem, data[1]);
data += 2;
break;
}
case NO_INSTR: case NO_INSTR:
processed = false; processed = false;
break; break;
@ -1368,6 +1346,11 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += 2; data += 2;
break; break;
case 0x2C:
AppendToBuffer("subb eax,0x%x", *reinterpret_cast<uint8_t*>(data+1));
data += 2;
break;
case 0xA9: case 0xA9:
AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1)); AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1));
data += 5; data += 5;

323
deps/v8/src/ia32/full-codegen-ia32.cc

@ -138,7 +138,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// function calls. // function calls.
if (info->is_strict_mode() || info->is_native()) { if (info->is_strict_mode() || info->is_native()) {
Label ok; Label ok;
__ test(ecx, ecx); __ test(ecx, Operand(ecx));
__ j(zero, &ok, Label::kNear); __ j(zero, &ok, Label::kNear);
// +1 for return address. // +1 for return address.
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize; int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
@ -147,11 +147,6 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
__ bind(&ok); __ bind(&ok);
} }
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
__ push(ebp); // Caller's frame pointer. __ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp); __ mov(ebp, esp);
__ push(esi); // Callee's context. __ push(esi); // Callee's context.
@ -205,12 +200,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
// Store it in the context. // Store it in the context.
int context_offset = Context::SlotOffset(var->index()); int context_offset = Context::SlotOffset(var->index());
__ mov(Operand(esi, context_offset), eax); __ mov(Operand(esi, context_offset), eax);
// Update the write barrier. This clobbers eax and ebx. // Update the write barrier. This clobbers all involved
__ RecordWriteContextSlot(esi, // registers, so we have use a third register to avoid
context_offset, // clobbering esi.
eax, __ mov(ecx, esi);
ebx, __ RecordWrite(ecx, context_offset, eax, ebx);
kDontSaveFPRegs);
} }
} }
} }
@ -371,10 +365,10 @@ void FullCodeGenerator::EmitReturnSequence() {
void FullCodeGenerator::verify_stack_height() { void FullCodeGenerator::verify_stack_height() {
ASSERT(FLAG_verify_stack_height); ASSERT(FLAG_verify_stack_height);
__ sub(ebp, Immediate(kPointerSize * stack_height())); __ sub(Operand(ebp), Immediate(kPointerSize * stack_height()));
__ cmp(ebp, esp); __ cmp(ebp, Operand(esp));
__ Assert(equal, "Full codegen stack height not as expected."); __ Assert(equal, "Full codegen stack height not as expected.");
__ add(ebp, Immediate(kPointerSize * stack_height())); __ add(Operand(ebp), Immediate(kPointerSize * stack_height()));
} }
@ -603,7 +597,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
ToBooleanStub stub(result_register()); ToBooleanStub stub(result_register());
__ push(result_register()); __ push(result_register());
__ CallStub(&stub, condition->test_id()); __ CallStub(&stub, condition->test_id());
__ test(result_register(), result_register()); __ test(result_register(), Operand(result_register()));
// The stub returns nonzero for true. // The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through); Split(not_zero, if_true, if_false, fall_through);
} }
@ -667,12 +661,11 @@ void FullCodeGenerator::SetVar(Variable* var,
ASSERT(!scratch1.is(src)); ASSERT(!scratch1.is(src));
MemOperand location = VarOperand(var, scratch0); MemOperand location = VarOperand(var, scratch0);
__ mov(location, src); __ mov(location, src);
// Emit the write barrier code if the location is in the heap. // Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) { if (var->IsContextSlot()) {
int offset = Context::SlotOffset(var->index()); int offset = Context::SlotOffset(var->index());
ASSERT(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi)); ASSERT(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi));
__ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs); __ RecordWrite(scratch0, offset, src, scratch1);
} }
} }
@ -745,14 +738,9 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
Comment cmnt(masm_, "[ Declaration"); Comment cmnt(masm_, "[ Declaration");
VisitForAccumulatorValue(function); VisitForAccumulatorValue(function);
__ mov(ContextOperand(esi, variable->index()), result_register()); __ mov(ContextOperand(esi, variable->index()), result_register());
// We know that we have written a function, which is not a smi. int offset = Context::SlotOffset(variable->index());
__ RecordWriteContextSlot(esi, __ mov(ebx, esi);
Context::SlotOffset(variable->index()), __ RecordWrite(ebx, offset, result_register(), ecx);
result_register(),
ecx,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS); PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
} else if (mode == Variable::CONST || mode == Variable::LET) { } else if (mode == Variable::CONST || mode == Variable::LET) {
Comment cmnt(masm_, "[ Declaration"); Comment cmnt(masm_, "[ Declaration");
@ -847,10 +835,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
if (inline_smi_code) { if (inline_smi_code) {
Label slow_case; Label slow_case;
__ mov(ecx, edx); __ mov(ecx, edx);
__ or_(ecx, eax); __ or_(ecx, Operand(eax));
patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear); patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
__ cmp(edx, eax); __ cmp(edx, Operand(eax));
__ j(not_equal, &next_test); __ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed. __ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target()); __ jmp(clause->body_target());
@ -862,7 +850,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT); Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
__ call(ic, RelocInfo::CODE_TARGET, clause->CompareId()); __ call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
__ test(eax, eax); __ test(eax, Operand(eax));
__ j(not_equal, &next_test); __ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed. __ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target()); __ jmp(clause->body_target());
@ -951,7 +939,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// For all objects but the receiver, check that the cache is empty. // For all objects but the receiver, check that the cache is empty.
Label check_prototype; Label check_prototype;
__ cmp(ecx, eax); __ cmp(ecx, Operand(eax));
__ j(equal, &check_prototype, Label::kNear); __ j(equal, &check_prototype, Label::kNear);
__ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset)); __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
__ cmp(edx, isolate()->factory()->empty_fixed_array()); __ cmp(edx, isolate()->factory()->empty_fixed_array());
@ -1033,9 +1021,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(ecx); // Enumerable. __ push(ecx); // Enumerable.
__ push(ebx); // Current entry. __ push(ebx); // Current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION); __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
__ test(eax, eax); __ test(eax, Operand(eax));
__ j(equal, loop_statement.continue_label()); __ j(equal, loop_statement.continue_label());
__ mov(ebx, eax); __ mov(ebx, Operand(eax));
// Update the 'each' property or variable from the possibly filtered // Update the 'each' property or variable from the possibly filtered
// entry in register ebx. // entry in register ebx.
@ -1059,7 +1047,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Remove the pointers stored on the stack. // Remove the pointers stored on the stack.
__ bind(loop_statement.break_label()); __ bind(loop_statement.break_label());
__ add(esp, Immediate(5 * kPointerSize)); __ add(Operand(esp), Immediate(5 * kPointerSize));
decrement_stack_height(ForIn::kElementCount); decrement_stack_height(ForIn::kElementCount);
// Exit and decrement the loop depth. // Exit and decrement the loop depth.
@ -1207,16 +1195,10 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == Variable::DYNAMIC_LOCAL) { } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed(); Variable* local = var->local_if_not_shadowed();
__ mov(eax, ContextSlotOperandCheckExtensions(local, slow)); __ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
if (local->mode() == Variable::CONST || if (local->mode() == Variable::CONST) {
local->mode() == Variable::LET) {
__ cmp(eax, isolate()->factory()->the_hole_value()); __ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, done); __ j(not_equal, done);
if (local->mode() == Variable::CONST) { __ mov(eax, isolate()->factory()->undefined_value());
__ mov(eax, isolate()->factory()->undefined_value());
} else { // Variable::LET
__ push(Immediate(var->name()));
__ CallRuntime(Runtime::kThrowReferenceError, 1);
}
} }
__ jmp(done); __ jmp(done);
} }
@ -1498,18 +1480,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize); int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ mov(FieldOperand(ebx, offset), result_register()); __ mov(FieldOperand(ebx, offset), result_register());
Label no_map_change;
__ JumpIfSmi(result_register(), &no_map_change);
// Update the write barrier for the array store. // Update the write barrier for the array store.
__ RecordWriteField(ebx, offset, result_register(), ecx, __ RecordWrite(ebx, offset, result_register(), ecx);
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
__ CheckFastSmiOnlyElements(edi, &no_map_change, Label::kNear);
__ push(Operand(esp, 0));
__ CallRuntime(Runtime::kNonSmiElementStored, 1);
__ bind(&no_map_change);
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS); PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
} }
@ -1669,7 +1641,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ pop(edx); __ pop(edx);
decrement_stack_height(); decrement_stack_height();
__ mov(ecx, eax); __ mov(ecx, eax);
__ or_(eax, edx); __ or_(eax, Operand(edx));
JumpPatchSite patch_site(masm_); JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear); patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear);
@ -1719,32 +1691,32 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
break; break;
} }
case Token::ADD: case Token::ADD:
__ add(eax, ecx); __ add(eax, Operand(ecx));
__ j(overflow, &stub_call); __ j(overflow, &stub_call);
break; break;
case Token::SUB: case Token::SUB:
__ sub(eax, ecx); __ sub(eax, Operand(ecx));
__ j(overflow, &stub_call); __ j(overflow, &stub_call);
break; break;
case Token::MUL: { case Token::MUL: {
__ SmiUntag(eax); __ SmiUntag(eax);
__ imul(eax, ecx); __ imul(eax, Operand(ecx));
__ j(overflow, &stub_call); __ j(overflow, &stub_call);
__ test(eax, eax); __ test(eax, Operand(eax));
__ j(not_zero, &done, Label::kNear); __ j(not_zero, &done, Label::kNear);
__ mov(ebx, edx); __ mov(ebx, edx);
__ or_(ebx, ecx); __ or_(ebx, Operand(ecx));
__ j(negative, &stub_call); __ j(negative, &stub_call);
break; break;
} }
case Token::BIT_OR: case Token::BIT_OR:
__ or_(eax, ecx); __ or_(eax, Operand(ecx));
break; break;
case Token::BIT_AND: case Token::BIT_AND:
__ and_(eax, ecx); __ and_(eax, Operand(ecx));
break; break;
case Token::BIT_XOR: case Token::BIT_XOR:
__ xor_(eax, ecx); __ xor_(eax, Operand(ecx));
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -1887,8 +1859,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ mov(location, eax); __ mov(location, eax);
if (var->IsContextSlot()) { if (var->IsContextSlot()) {
__ mov(edx, eax); __ mov(edx, eax);
int offset = Context::SlotOffset(var->index()); __ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx);
__ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
} }
} }
@ -1906,8 +1877,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ mov(location, eax); __ mov(location, eax);
if (var->IsContextSlot()) { if (var->IsContextSlot()) {
__ mov(edx, eax); __ mov(edx, eax);
int offset = Context::SlotOffset(var->index()); __ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx);
__ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
} }
} else { } else {
ASSERT(var->IsLookupSlot()); ASSERT(var->IsLookupSlot());
@ -2099,29 +2069,8 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
} }
// Record source position for debugger. // Record source position for debugger.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
// Record call targets in unoptimized code, but not in the snapshot.
bool record_call_target = !Serializer::enabled();
if (record_call_target) {
flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
}
CallFunctionStub stub(arg_count, flags); CallFunctionStub stub(arg_count, flags);
__ CallStub(&stub); __ CallStub(&stub);
if (record_call_target) {
// There is a one element cache in the instruction stream.
#ifdef DEBUG
int return_site_offset = masm()->pc_offset();
#endif
Handle<Object> uninitialized =
CallFunctionStub::UninitializedSentinel(isolate());
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
__ test(eax, Immediate(cell));
// Patching code in the stub assumes the opcode is 1 byte and there is
// word for a pointer in the operand.
ASSERT(masm()->pc_offset() - return_site_offset >= 1 + kPointerSize);
}
RecordJSReturnSite(expr); RecordJSReturnSite(expr);
// Restore context register. // Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@ -2489,9 +2438,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
STATIC_ASSERT(kPointerSize == 4); STATIC_ASSERT(kPointerSize == 4);
__ lea(ecx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize)); __ lea(ecx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
// Calculate location of the first key name. // Calculate location of the first key name.
__ add(ebx, __ add(Operand(ebx),
Immediate(FixedArray::kHeaderSize + Immediate(FixedArray::kHeaderSize +
DescriptorArray::kFirstIndex * kPointerSize)); DescriptorArray::kFirstIndex * kPointerSize));
// Loop through all the keys in the descriptor array. If one of these is the // Loop through all the keys in the descriptor array. If one of these is the
// symbol valueOf the result is false. // symbol valueOf the result is false.
Label entry, loop; Label entry, loop;
@ -2500,9 +2449,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ mov(edx, FieldOperand(ebx, 0)); __ mov(edx, FieldOperand(ebx, 0));
__ cmp(edx, FACTORY->value_of_symbol()); __ cmp(edx, FACTORY->value_of_symbol());
__ j(equal, if_false); __ j(equal, if_false);
__ add(ebx, Immediate(kPointerSize)); __ add(Operand(ebx), Immediate(kPointerSize));
__ bind(&entry); __ bind(&entry);
__ cmp(ebx, ecx); __ cmp(ebx, Operand(ecx));
__ j(not_equal, &loop); __ j(not_equal, &loop);
// Reload map as register ebx was used as temporary above. // Reload map as register ebx was used as temporary above.
@ -2642,7 +2591,7 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
__ pop(ebx); __ pop(ebx);
decrement_stack_height(); decrement_stack_height();
__ cmp(eax, ebx); __ cmp(eax, Operand(ebx));
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through); Split(equal, if_true, if_false, fall_through);
@ -2698,24 +2647,20 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
// Check that the object is a JS object but take special care of JS // Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class. // functions to make sure they have 'Function' as their class.
// Assume that there are only two callable types, and one of them is at
// either end of the type range for JS object types. Saves extra comparisons.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax); __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax);
// Map is now in eax. // Map is now in eax.
__ j(below, &null); __ j(below, &null);
STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
FIRST_SPEC_OBJECT_TYPE + 1); // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
__ j(equal, &function); // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
// LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
__ CmpInstanceType(eax, LAST_SPEC_OBJECT_TYPE); STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
LAST_SPEC_OBJECT_TYPE - 1); LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
__ j(equal, &function); __ CmpInstanceType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
// Assume that there is no larger type. __ j(above_equal, &function);
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a function.
// Check if the constructor in the map is a JS function.
__ mov(eax, FieldOperand(eax, Map::kConstructorOffset)); __ mov(eax, FieldOperand(eax, Map::kConstructorOffset));
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx); __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
__ j(not_equal, &non_function_constructor); __ j(not_equal, &non_function_constructor);
@ -2796,8 +2741,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
if (CpuFeatures::IsSupported(SSE2)) { if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2); CpuFeatures::Scope fscope(SSE2);
__ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single. __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
__ movd(xmm1, ebx); __ movd(xmm1, Operand(ebx));
__ movd(xmm0, eax); __ movd(xmm0, Operand(eax));
__ cvtss2sd(xmm1, xmm1); __ cvtss2sd(xmm1, xmm1);
__ xorps(xmm0, xmm1); __ xorps(xmm0, xmm1);
__ subsd(xmm0, xmm1); __ subsd(xmm0, xmm1);
@ -2898,11 +2843,10 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
// Store the value. // Store the value.
__ mov(FieldOperand(ebx, JSValue::kValueOffset), eax); __ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
// Update the write barrier. Save the value as it will be // Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward. // overwritten by the write barrier code and is needed afterward.
__ mov(edx, eax); __ mov(edx, eax);
__ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx, kDontSaveFPRegs); __ RecordWrite(ebx, JSValue::kValueOffset, edx, ecx);
__ bind(&done); __ bind(&done);
context()->Plug(eax); context()->Plug(eax);
@ -3175,14 +3119,14 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ mov(index_1, Operand(esp, 1 * kPointerSize)); __ mov(index_1, Operand(esp, 1 * kPointerSize));
__ mov(index_2, Operand(esp, 0)); __ mov(index_2, Operand(esp, 0));
__ mov(temp, index_1); __ mov(temp, index_1);
__ or_(temp, index_2); __ or_(temp, Operand(index_2));
__ JumpIfNotSmi(temp, &slow_case); __ JumpIfNotSmi(temp, &slow_case);
// Check that both indices are valid. // Check that both indices are valid.
__ mov(temp, FieldOperand(object, JSArray::kLengthOffset)); __ mov(temp, FieldOperand(object, JSArray::kLengthOffset));
__ cmp(temp, index_1); __ cmp(temp, Operand(index_1));
__ j(below_equal, &slow_case); __ j(below_equal, &slow_case);
__ cmp(temp, index_2); __ cmp(temp, Operand(index_2));
__ j(below_equal, &slow_case); __ j(below_equal, &slow_case);
// Bring addresses into index1 and index2. // Bring addresses into index1 and index2.
@ -3195,35 +3139,16 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ mov(Operand(index_2, 0), object); __ mov(Operand(index_2, 0), object);
__ mov(Operand(index_1, 0), temp); __ mov(Operand(index_1, 0), temp);
Label no_remembered_set; Label new_space;
__ CheckPageFlag(elements, __ InNewSpace(elements, temp, equal, &new_space);
temp,
1 << MemoryChunk::SCAN_ON_SCAVENGE, __ mov(object, elements);
not_zero, __ RecordWriteHelper(object, index_1, temp);
&no_remembered_set, __ RecordWriteHelper(elements, index_2, temp);
Label::kNear);
// Possible optimization: do a check that both values are Smis
// (or them and test against Smi mask.)
// We are swapping two objects in an array and the incremental marker never
// pauses in the middle of scanning a single object. Therefore the
// incremental marker is not disturbed, so we don't need to call the
// RecordWrite stub that notifies the incremental marker.
__ RememberedSetHelper(elements,
index_1,
temp,
kDontSaveFPRegs,
MacroAssembler::kFallThroughAtEnd);
__ RememberedSetHelper(elements,
index_2,
temp,
kDontSaveFPRegs,
MacroAssembler::kFallThroughAtEnd);
__ bind(&no_remembered_set);
__ bind(&new_space);
// We are done. Drop elements from the stack, and return undefined. // We are done. Drop elements from the stack, and return undefined.
__ add(esp, Immediate(3 * kPointerSize)); __ add(Operand(esp), Immediate(3 * kPointerSize));
__ mov(eax, isolate()->factory()->undefined_value()); __ mov(eax, isolate()->factory()->undefined_value());
__ jmp(&done); __ jmp(&done);
@ -3296,11 +3221,11 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
__ pop(left); __ pop(left);
Label done, fail, ok; Label done, fail, ok;
__ cmp(left, right); __ cmp(left, Operand(right));
__ j(equal, &ok); __ j(equal, &ok);
// Fail if either is a non-HeapObject. // Fail if either is a non-HeapObject.
__ mov(tmp, left); __ mov(tmp, left);
__ and_(tmp, right); __ and_(Operand(tmp), right);
__ JumpIfSmi(tmp, &fail); __ JumpIfSmi(tmp, &fail);
__ mov(tmp, FieldOperand(left, HeapObject::kMapOffset)); __ mov(tmp, FieldOperand(left, HeapObject::kMapOffset));
__ CmpInstanceType(tmp, JS_REGEXP_TYPE); __ CmpInstanceType(tmp, JS_REGEXP_TYPE);
@ -3391,7 +3316,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
Operand separator_operand = Operand(esp, 2 * kPointerSize); Operand separator_operand = Operand(esp, 2 * kPointerSize);
Operand result_operand = Operand(esp, 1 * kPointerSize); Operand result_operand = Operand(esp, 1 * kPointerSize);
Operand array_length_operand = Operand(esp, 0); Operand array_length_operand = Operand(esp, 0);
__ sub(esp, Immediate(2 * kPointerSize)); __ sub(Operand(esp), Immediate(2 * kPointerSize));
__ cld(); __ cld();
// Check that the array is a JSArray // Check that the array is a JSArray
__ JumpIfSmi(array, &bailout); __ JumpIfSmi(array, &bailout);
@ -3427,7 +3352,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
// Live loop registers: index, array_length, string, // Live loop registers: index, array_length, string,
// scratch, string_length, elements. // scratch, string_length, elements.
if (FLAG_debug_code) { if (FLAG_debug_code) {
__ cmp(index, array_length); __ cmp(index, Operand(array_length));
__ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin"); __ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin");
} }
__ bind(&loop); __ bind(&loop);
@ -3445,8 +3370,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ add(string_length, __ add(string_length,
FieldOperand(string, SeqAsciiString::kLengthOffset)); FieldOperand(string, SeqAsciiString::kLengthOffset));
__ j(overflow, &bailout); __ j(overflow, &bailout);
__ add(index, Immediate(1)); __ add(Operand(index), Immediate(1));
__ cmp(index, array_length); __ cmp(index, Operand(array_length));
__ j(less, &loop); __ j(less, &loop);
// If array_length is 1, return elements[0], a string. // If array_length is 1, return elements[0], a string.
@ -3480,10 +3405,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
// to string_length. // to string_length.
__ mov(scratch, separator_operand); __ mov(scratch, separator_operand);
__ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset)); __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
__ sub(string_length, scratch); // May be negative, temporarily. __ sub(string_length, Operand(scratch)); // May be negative, temporarily.
__ imul(scratch, array_length_operand); __ imul(scratch, array_length_operand);
__ j(overflow, &bailout); __ j(overflow, &bailout);
__ add(string_length, scratch); __ add(string_length, Operand(scratch));
__ j(overflow, &bailout); __ j(overflow, &bailout);
__ shr(string_length, 1); __ shr(string_length, 1);
@ -3524,7 +3449,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ lea(string, __ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize)); FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch); __ CopyBytes(string, result_pos, string_length, scratch);
__ add(index, Immediate(1)); __ add(Operand(index), Immediate(1));
__ bind(&loop_1_condition); __ bind(&loop_1_condition);
__ cmp(index, array_length_operand); __ cmp(index, array_length_operand);
__ j(less, &loop_1); // End while (index < length). __ j(less, &loop_1); // End while (index < length).
@ -3565,7 +3490,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ lea(string, __ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize)); FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch); __ CopyBytes(string, result_pos, string_length, scratch);
__ add(index, Immediate(1)); __ add(Operand(index), Immediate(1));
__ cmp(index, array_length_operand); __ cmp(index, array_length_operand);
__ j(less, &loop_2); // End while (index < length). __ j(less, &loop_2); // End while (index < length).
@ -3606,7 +3531,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ lea(string, __ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize)); FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch); __ CopyBytes(string, result_pos, string_length, scratch);
__ add(index, Immediate(1)); __ add(Operand(index), Immediate(1));
__ cmp(index, array_length_operand); __ cmp(index, array_length_operand);
__ j(less, &loop_3); // End while (index < length). __ j(less, &loop_3); // End while (index < length).
@ -3618,7 +3543,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ bind(&done); __ bind(&done);
__ mov(eax, result_operand); __ mov(eax, result_operand);
// Drop temp values from the stack, and restore context register. // Drop temp values from the stack, and restore context register.
__ add(esp, Immediate(3 * kPointerSize)); __ add(Operand(esp), Immediate(3 * kPointerSize));
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
decrement_stack_height(); decrement_stack_height();
@ -3898,9 +3823,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (ShouldInlineSmiCase(expr->op())) { if (ShouldInlineSmiCase(expr->op())) {
if (expr->op() == Token::INC) { if (expr->op() == Token::INC) {
__ add(eax, Immediate(Smi::FromInt(1))); __ add(Operand(eax), Immediate(Smi::FromInt(1)));
} else { } else {
__ sub(eax, Immediate(Smi::FromInt(1))); __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
} }
__ j(overflow, &stub_call, Label::kNear); __ j(overflow, &stub_call, Label::kNear);
// We could eliminate this smi check if we split the code at // We could eliminate this smi check if we split the code at
@ -3910,9 +3835,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ bind(&stub_call); __ bind(&stub_call);
// Call stub. Undo operation first. // Call stub. Undo operation first.
if (expr->op() == Token::INC) { if (expr->op() == Token::INC) {
__ sub(eax, Immediate(Smi::FromInt(1))); __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
} else { } else {
__ add(eax, Immediate(Smi::FromInt(1))); __ add(Operand(eax), Immediate(Smi::FromInt(1)));
} }
} }
@ -4031,14 +3956,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Handle<String> check) { Handle<String> check,
Label materialize_true, materialize_false; Label* if_true,
Label* if_true = NULL; Label* if_false,
Label* if_false = NULL; Label* fall_through) {
Label* fall_through = NULL;
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
{ AccumulatorValueContext context(this); { AccumulatorValueContext context(this);
VisitForTypeofValue(expr); VisitForTypeofValue(expr);
} }
@ -4077,11 +3998,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Split(not_zero, if_true, if_false, fall_through); Split(not_zero, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->function_symbol())) { } else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(eax, if_false); __ JumpIfSmi(eax, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ CmpObjectType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, edx);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, edx); Split(above_equal, if_true, if_false, fall_through);
__ j(equal, if_true);
__ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE);
Split(equal, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_symbol())) { } else if (check->Equals(isolate()->heap()->object_symbol())) {
__ JumpIfSmi(eax, if_false); __ JumpIfSmi(eax, if_false);
if (!FLAG_harmony_typeof) { if (!FLAG_harmony_typeof) {
@ -4099,7 +4017,18 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else { } else {
if (if_false != fall_through) __ jmp(if_false); if (if_false != fall_through) __ jmp(if_false);
} }
context()->Plug(if_true, if_false); }
void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
Label* if_true,
Label* if_false,
Label* fall_through) {
VisitForAccumulatorValue(expr);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ cmp(eax, isolate()->factory()->undefined_value());
Split(equal, if_true, if_false, fall_through);
} }
@ -4107,12 +4036,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation"); Comment cmnt(masm_, "[ CompareOperation");
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
if (TryLiteralCompare(expr)) return;
// Always perform the comparison for its control flow. Pack the result // Always perform the comparison for its control flow. Pack the result
// into the expression's context after the comparison is performed. // into the expression's context after the comparison is performed.
Label materialize_true, materialize_false; Label materialize_true, materialize_false;
Label* if_true = NULL; Label* if_true = NULL;
Label* if_false = NULL; Label* if_false = NULL;
@ -4120,9 +4046,16 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
context()->PrepareTest(&materialize_true, &materialize_false, context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through); &if_true, &if_false, &fall_through);
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
context()->Plug(if_true, if_false);
return;
}
Token::Value op = expr->op(); Token::Value op = expr->op();
VisitForStackValue(expr->left()); VisitForStackValue(expr->left());
switch (op) { switch (expr->op()) {
case Token::IN: case Token::IN:
VisitForStackValue(expr->right()); VisitForStackValue(expr->right());
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION); __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
@ -4138,7 +4071,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ CallStub(&stub); __ CallStub(&stub);
decrement_stack_height(2); decrement_stack_height(2);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ test(eax, eax); __ test(eax, Operand(eax));
// The stub returns 0 for true. // The stub returns 0 for true.
Split(zero, if_true, if_false, fall_through); Split(zero, if_true, if_false, fall_through);
break; break;
@ -4147,8 +4080,11 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: { default: {
VisitForAccumulatorValue(expr->right()); VisitForAccumulatorValue(expr->right());
Condition cc = no_condition; Condition cc = no_condition;
bool strict = false;
switch (op) { switch (op) {
case Token::EQ_STRICT: case Token::EQ_STRICT:
strict = true;
// Fall through
case Token::EQ: case Token::EQ:
cc = equal; cc = equal;
__ pop(edx); __ pop(edx);
@ -4184,10 +4120,10 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
JumpPatchSite patch_site(masm_); JumpPatchSite patch_site(masm_);
if (inline_smi_code) { if (inline_smi_code) {
Label slow_case; Label slow_case;
__ mov(ecx, edx); __ mov(ecx, Operand(edx));
__ or_(ecx, eax); __ or_(ecx, Operand(eax));
patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear); patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
__ cmp(edx, eax); __ cmp(edx, Operand(eax));
Split(cc, if_true, if_false, NULL); Split(cc, if_true, if_false, NULL);
__ bind(&slow_case); __ bind(&slow_case);
} }
@ -4199,7 +4135,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ test(eax, eax); __ test(eax, Operand(eax));
Split(cc, if_true, if_false, fall_through); Split(cc, if_true, if_false, fall_through);
} }
} }
@ -4210,9 +4146,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
} }
void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
Expression* sub_expr,
NilValue nil) {
Label materialize_true, materialize_false; Label materialize_true, materialize_false;
Label* if_true = NULL; Label* if_true = NULL;
Label* if_false = NULL; Label* if_false = NULL;
@ -4220,20 +4154,15 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
context()->PrepareTest(&materialize_true, &materialize_false, context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through); &if_true, &if_false, &fall_through);
VisitForAccumulatorValue(sub_expr); VisitForAccumulatorValue(expr->expression());
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Handle<Object> nil_value = nil == kNullValue ?
isolate()->factory()->null_value() : __ cmp(eax, isolate()->factory()->null_value());
isolate()->factory()->undefined_value(); if (expr->is_strict()) {
__ cmp(eax, nil_value);
if (expr->op() == Token::EQ_STRICT) {
Split(equal, if_true, if_false, fall_through); Split(equal, if_true, if_false, fall_through);
} else { } else {
Handle<Object> other_nil_value = nil == kNullValue ?
isolate()->factory()->undefined_value() :
isolate()->factory()->null_value();
__ j(equal, if_true); __ j(equal, if_true);
__ cmp(eax, other_nil_value); __ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, if_true); __ j(equal, if_true);
__ JumpIfSmi(eax, if_false); __ JumpIfSmi(eax, if_false);
// It can be an undetectable object. // It can be an undetectable object.
@ -4300,7 +4229,7 @@ void FullCodeGenerator::EnterFinallyBlock() {
// Cook return address on top of stack (smi encoded Code* delta) // Cook return address on top of stack (smi encoded Code* delta)
ASSERT(!result_register().is(edx)); ASSERT(!result_register().is(edx));
__ pop(edx); __ pop(edx);
__ sub(edx, Immediate(masm_->CodeObject())); __ sub(Operand(edx), Immediate(masm_->CodeObject()));
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ SmiTag(edx); __ SmiTag(edx);
@ -4316,8 +4245,8 @@ void FullCodeGenerator::ExitFinallyBlock() {
// Uncook return address. // Uncook return address.
__ pop(edx); __ pop(edx);
__ SmiUntag(edx); __ SmiUntag(edx);
__ add(edx, Immediate(masm_->CodeObject())); __ add(Operand(edx), Immediate(masm_->CodeObject()));
__ jmp(edx); __ jmp(Operand(edx));
} }

160
deps/v8/src/ia32/ic-ia32.cc

@ -212,7 +212,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Update write barrier. Make sure not to clobber the value. // Update write barrier. Make sure not to clobber the value.
__ mov(r1, value); __ mov(r1, value);
__ RecordWrite(elements, r0, r1, kDontSaveFPRegs); __ RecordWrite(elements, r0, r1);
} }
@ -326,7 +326,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// Fast case: Do the load. // Fast case: Do the load.
STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0)); STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
__ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize)); __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
__ cmp(scratch, Immediate(FACTORY->the_hole_value())); __ cmp(Operand(scratch), Immediate(FACTORY->the_hole_value()));
// In case the loaded value is the_hole we have to consult GetProperty // In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched. // to ensure the prototype chain is searched.
__ j(equal, out_of_range); __ j(equal, out_of_range);
@ -394,8 +394,8 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Check if element is in the range of mapped arguments. If not, jump // Check if element is in the range of mapped arguments. If not, jump
// to the unmapped lookup with the parameter map in scratch1. // to the unmapped lookup with the parameter map in scratch1.
__ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset)); __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
__ sub(scratch2, Immediate(Smi::FromInt(2))); __ sub(Operand(scratch2), Immediate(Smi::FromInt(2)));
__ cmp(key, scratch2); __ cmp(key, Operand(scratch2));
__ j(greater_equal, unmapped_case); __ j(greater_equal, unmapped_case);
// Load element index and check whether it is the hole. // Load element index and check whether it is the hole.
@ -432,7 +432,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map()); Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
__ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK); __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
__ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset)); __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
__ cmp(key, scratch); __ cmp(key, Operand(scratch));
__ j(greater_equal, slow_case); __ j(greater_equal, slow_case);
return FieldOperand(backing_store, return FieldOperand(backing_store,
key, key,
@ -534,7 +534,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ shr(ecx, KeyedLookupCache::kMapHashShift); __ shr(ecx, KeyedLookupCache::kMapHashShift);
__ mov(edi, FieldOperand(eax, String::kHashFieldOffset)); __ mov(edi, FieldOperand(eax, String::kHashFieldOffset));
__ shr(edi, String::kHashShift); __ shr(edi, String::kHashShift);
__ xor_(ecx, edi); __ xor_(ecx, Operand(edi));
__ and_(ecx, KeyedLookupCache::kCapacityMask); __ and_(ecx, KeyedLookupCache::kCapacityMask);
// Load the key (consisting of map and symbol) from the cache and // Load the key (consisting of map and symbol) from the cache and
@ -545,7 +545,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ shl(edi, kPointerSizeLog2 + 1); __ shl(edi, kPointerSizeLog2 + 1);
__ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys)); __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow); __ j(not_equal, &slow);
__ add(edi, Immediate(kPointerSize)); __ add(Operand(edi), Immediate(kPointerSize));
__ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys)); __ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow); __ j(not_equal, &slow);
@ -559,12 +559,12 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(edi, __ mov(edi,
Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets)); Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
__ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset)); __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
__ sub(edi, ecx); __ sub(edi, Operand(ecx));
__ j(above_equal, &property_array_property); __ j(above_equal, &property_array_property);
// Load in-object property. // Load in-object property.
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset)); __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ add(ecx, edi); __ add(ecx, Operand(edi));
__ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0)); __ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1); __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0); __ ret(0);
@ -651,8 +651,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// Check that it has indexed interceptor and access checks // Check that it has indexed interceptor and access checks
// are not enabled for this object. // are not enabled for this object.
__ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset)); __ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset));
__ and_(ecx, Immediate(kSlowCaseBitFieldMask)); __ and_(Operand(ecx), Immediate(kSlowCaseBitFieldMask));
__ cmp(ecx, Immediate(1 << Map::kHasIndexedInterceptor)); __ cmp(Operand(ecx), Immediate(1 << Map::kHasIndexedInterceptor));
__ j(not_zero, &slow); __ j(not_zero, &slow);
// Everything is fine, call runtime. // Everything is fine, call runtime.
@ -710,7 +710,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ mov(mapped_location, eax); __ mov(mapped_location, eax);
__ lea(ecx, mapped_location); __ lea(ecx, mapped_location);
__ mov(edx, eax); __ mov(edx, eax);
__ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs); __ RecordWrite(ebx, ecx, edx);
__ Ret(); __ Ret();
__ bind(&notin); __ bind(&notin);
// The unmapped lookup expects that the parameter map is in ebx. // The unmapped lookup expects that the parameter map is in ebx.
@ -719,7 +719,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ mov(unmapped_location, eax); __ mov(unmapped_location, eax);
__ lea(edi, unmapped_location); __ lea(edi, unmapped_location);
__ mov(edx, eax); __ mov(edx, eax);
__ RecordWrite(ebx, edi, edx, kDontSaveFPRegs); __ RecordWrite(ebx, edi, edx);
__ Ret(); __ Ret();
__ bind(&slow); __ bind(&slow);
GenerateMiss(masm, false); GenerateMiss(masm, false);
@ -734,9 +734,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- edx : receiver // -- edx : receiver
// -- esp[0] : return address // -- esp[0] : return address
// ----------------------------------- // -----------------------------------
Label slow, fast_object_with_map_check, fast_object_without_map_check; Label slow, fast, array, extra;
Label fast_double_with_map_check, fast_double_without_map_check;
Label check_if_double_array, array, extra;
// Check that the object isn't a smi. // Check that the object isn't a smi.
__ JumpIfSmi(edx, &slow); __ JumpIfSmi(edx, &slow);
@ -752,18 +750,22 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ CmpInstanceType(edi, JS_ARRAY_TYPE); __ CmpInstanceType(edi, JS_ARRAY_TYPE);
__ j(equal, &array); __ j(equal, &array);
// Check that the object is some kind of JSObject. // Check that the object is some kind of JSObject.
__ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE); __ CmpInstanceType(edi, FIRST_JS_RECEIVER_TYPE);
__ j(below, &slow); __ j(below, &slow);
__ CmpInstanceType(edi, JS_PROXY_TYPE);
__ j(equal, &slow);
__ CmpInstanceType(edi, JS_FUNCTION_PROXY_TYPE);
__ j(equal, &slow);
// Object case: Check key against length in the elements array. // Object case: Check key against length in the elements array.
// eax: value // eax: value
// edx: JSObject // edx: JSObject
// ecx: key (a smi) // ecx: key (a smi)
// edi: receiver map __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); // Check that the object is in fast mode and writable.
// Check array bounds. Both the key and the length of FixedArray are smis. __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
__ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset)); __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
__ j(below, &fast_object_with_map_check); __ j(below, &fast);
// Slow case: call runtime. // Slow case: call runtime.
__ bind(&slow); __ bind(&slow);
@ -776,28 +778,16 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// eax: value // eax: value
// edx: receiver, a JSArray // edx: receiver, a JSArray
// ecx: key, a smi. // ecx: key, a smi.
// ebx: receiver->elements, a FixedArray // edi: receiver->elements, a FixedArray
// edi: receiver map
// flags: compare (ecx, edx.length()) // flags: compare (ecx, edx.length())
// do not leave holes in the array: // do not leave holes in the array:
__ j(not_equal, &slow); __ j(not_equal, &slow);
__ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset)); __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
__ j(above_equal, &slow); __ j(above_equal, &slow);
__ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset)); // Add 1 to receiver->length, and go to fast array write.
__ cmp(edi, masm->isolate()->factory()->fixed_array_map());
__ j(not_equal, &check_if_double_array);
// Add 1 to receiver->length, and go to common element store code for Objects.
__ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ jmp(&fast_object_without_map_check);
__ bind(&check_if_double_array);
__ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
__ j(not_equal, &slow);
// Add 1 to receiver->length, and go to common element store code for doubles.
__ add(FieldOperand(edx, JSArray::kLengthOffset), __ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1))); Immediate(Smi::FromInt(1)));
__ jmp(&fast_double_without_map_check); __ jmp(&fast);
// Array case: Get the length and the elements array from the JS // Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it // array. Check that the array is in fast mode (and writable); if it
@ -806,54 +796,24 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// eax: value // eax: value
// edx: receiver, a JSArray // edx: receiver, a JSArray
// ecx: key, a smi. // ecx: key, a smi.
// edi: receiver map __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
// Check the key against the length in the array and fall through to the // Check the key against the length in the array, compute the
// common store code. // address to store into and fall through to fast case.
__ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // Compare smis. __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // Compare smis.
__ j(above_equal, &extra); __ j(above_equal, &extra);
// Fast case: Do the store, could either Object or double. // Fast case: Do the store.
__ bind(&fast_object_with_map_check); __ bind(&fast);
// eax: value // eax: value
// ecx: key (a smi) // ecx: key (a smi)
// edx: receiver // edx: receiver
// ebx: FixedArray receiver->elements // edi: FixedArray receiver->elements
// edi: receiver map __ mov(CodeGenerator::FixedArrayElementOperand(edi, ecx), eax);
__ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
__ cmp(edi, masm->isolate()->factory()->fixed_array_map());
__ j(not_equal, &fast_double_with_map_check);
__ bind(&fast_object_without_map_check);
// Smi stores don't require further checks.
Label non_smi_value;
__ JumpIfNotSmi(eax, &non_smi_value);
// It's irrelevant whether array is smi-only or not when writing a smi.
__ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
__ ret(0);
__ bind(&non_smi_value);
// Escape to slow case when writing non-smi into smi-only array.
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
__ CheckFastObjectElements(edi, &slow, Label::kNear);
// Fast elements array, store the value to the elements backing store.
__ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
// Update write barrier for the elements array address. // Update write barrier for the elements array address.
__ mov(edx, eax); // Preserve the value which is returned. __ mov(edx, Operand(eax));
__ RecordWriteArray( __ RecordWrite(edi, 0, edx, ecx);
ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ ret(0);
__ bind(&fast_double_with_map_check);
// Check for fast double array case. If this fails, call through to the
// runtime.
__ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
__ j(not_equal, &slow);
__ bind(&fast_double_without_map_check);
// If the value is a number, store it as a double in the FastDoubleElements
// array.
__ StoreNumberToDoubleElements(eax, ebx, ecx, edx, xmm0, &slow, false);
__ ret(0); __ ret(0);
} }
@ -991,22 +951,22 @@ static void GenerateCallMiss(MacroAssembler* masm,
// Get the receiver of the function from the stack; 1 ~ return address. // Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
{ // Enter an internal frame.
FrameScope scope(masm, StackFrame::INTERNAL); __ EnterInternalFrame();
// Push the receiver and the name of the function. // Push the receiver and the name of the function.
__ push(edx); __ push(edx);
__ push(ecx); __ push(ecx);
// Call the entry. // Call the entry.
CEntryStub stub(1); CEntryStub stub(1);
__ mov(eax, Immediate(2)); __ mov(eax, Immediate(2));
__ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate()))); __ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate())));
__ CallStub(&stub); __ CallStub(&stub);
// Move result to edi and exit the internal frame. // Move result to edi and exit the internal frame.
__ mov(edi, eax); __ mov(edi, eax);
} __ LeaveInternalFrame();
// Check if the receiver is a global object of some sort. // Check if the receiver is a global object of some sort.
// This can happen only for regular CallIC but not KeyedCallIC. // This can happen only for regular CallIC but not KeyedCallIC.
@ -1151,17 +1111,13 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// This branch is taken when calling KeyedCallIC_Miss is neither required // This branch is taken when calling KeyedCallIC_Miss is neither required
// nor beneficial. // nor beneficial.
__ IncrementCounter(counters->keyed_call_generic_slow_load(), 1); __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
__ EnterInternalFrame();
{ __ push(ecx); // save the key
FrameScope scope(masm, StackFrame::INTERNAL); __ push(edx); // pass the receiver
__ push(ecx); // save the key __ push(ecx); // pass the key
__ push(edx); // pass the receiver __ CallRuntime(Runtime::kKeyedGetProperty, 2);
__ push(ecx); // pass the key __ pop(ecx); // restore the key
__ CallRuntime(Runtime::kKeyedGetProperty, 2); __ LeaveInternalFrame();
__ pop(ecx); // restore the key
// Leave the internal frame.
}
__ mov(edi, eax); __ mov(edi, eax);
__ jmp(&do_call); __ jmp(&do_call);

255
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -70,17 +70,6 @@ bool LCodeGen::GenerateCode() {
ASSERT(is_unused()); ASSERT(is_unused());
status_ = GENERATING; status_ = GENERATING;
CpuFeatures::Scope scope(SSE2); CpuFeatures::Scope scope(SSE2);
CodeStub::GenerateFPStubs();
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
dynamic_frame_alignment_ = chunk()->num_double_slots() > 2 ||
info()->osr_ast_id() != AstNode::kNoNumber;
return GeneratePrologue() && return GeneratePrologue() &&
GenerateBody() && GenerateBody() &&
GenerateDeferredCode() && GenerateDeferredCode() &&
@ -155,29 +144,6 @@ bool LCodeGen::GeneratePrologue() {
__ bind(&ok); __ bind(&ok);
} }
if (dynamic_frame_alignment_) {
Label do_not_pad, align_loop;
STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
// Align esp to a multiple of 2 * kPointerSize.
__ test(esp, Immediate(kPointerSize));
__ j(zero, &do_not_pad, Label::kNear);
__ push(Immediate(0));
__ mov(ebx, esp);
// Copy arguments, receiver, and return address.
__ mov(ecx, Immediate(scope()->num_parameters() + 2));
__ bind(&align_loop);
__ mov(eax, Operand(ebx, 1 * kPointerSize));
__ mov(Operand(ebx, 0), eax);
__ add(Operand(ebx), Immediate(kPointerSize));
__ dec(ecx);
__ j(not_zero, &align_loop, Label::kNear);
__ mov(Operand(ebx, 0),
Immediate(isolate()->factory()->frame_alignment_marker()));
__ bind(&do_not_pad);
}
__ push(ebp); // Caller's frame pointer. __ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp); __ mov(ebp, esp);
__ push(esi); // Callee's context. __ push(esi); // Callee's context.
@ -238,12 +204,11 @@ bool LCodeGen::GeneratePrologue() {
// Store it in the context. // Store it in the context.
int context_offset = Context::SlotOffset(var->index()); int context_offset = Context::SlotOffset(var->index());
__ mov(Operand(esi, context_offset), eax); __ mov(Operand(esi, context_offset), eax);
// Update the write barrier. This clobbers eax and ebx. // Update the write barrier. This clobbers all involved
__ RecordWriteContextSlot(esi, // registers, so we have to use a third register to avoid
context_offset, // clobbering esi.
eax, __ mov(ecx, esi);
ebx, __ RecordWrite(ecx, context_offset, eax, ebx);
kDontSaveFPRegs);
} }
} }
Comment(";;; End allocate local context"); Comment(";;; End allocate local context");
@ -295,9 +260,6 @@ bool LCodeGen::GenerateDeferredCode() {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i]; LDeferredCode* code = deferred_[i];
__ bind(code->entry()); __ bind(code->entry());
Comment(";;; Deferred code @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
code->Generate(); code->Generate();
__ jmp(code->exit()); __ jmp(code->exit());
} }
@ -519,18 +481,14 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc, int argc,
LInstruction* instr, LInstruction* instr,
LOperand* context) { LOperand* context) {
ASSERT(context->IsRegister() || context->IsStackSlot());
if (context->IsRegister()) { if (context->IsRegister()) {
if (!ToRegister(context).is(esi)) { if (!ToRegister(context).is(esi)) {
__ mov(esi, ToRegister(context)); __ mov(esi, ToRegister(context));
} }
} else if (context->IsStackSlot()) {
__ mov(esi, ToOperand(context));
} else if (context->IsConstantOperand()) {
Handle<Object> literal =
chunk_->LookupLiteral(LConstantOperand::cast(context));
LoadHeapObject(esi, Handle<Context>::cast(literal));
} else { } else {
UNREACHABLE(); // Context is stack slot.
__ mov(esi, ToOperand(context));
} }
__ CallRuntimeSaveDoubles(id); __ CallRuntimeSaveDoubles(id);
@ -711,7 +669,7 @@ void LCodeGen::RecordSafepoint(
int arguments, int arguments,
int deoptimization_index) { int deoptimization_index) {
ASSERT(kind == expected_safepoint_kind_); ASSERT(kind == expected_safepoint_kind_);
const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(), Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
kind, arguments, deoptimization_index); kind, arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) { for (int i = 0; i < operands->length(); i++) {
@ -1242,13 +1200,8 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) { void LCodeGen::DoConstantT(LConstantT* instr) {
Register reg = ToRegister(instr->result()); ASSERT(instr->result()->IsRegister());
Handle<Object> handle = instr->value(); __ Set(ToRegister(instr->result()), Immediate(instr->value()));
if (handle->IsHeapObject()) {
LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
} else {
__ Set(reg, Immediate(handle));
}
} }
@ -1624,33 +1577,23 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
} }
void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) { void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0)); Register reg = ToRegister(instr->InputAt(0));
int false_block = chunk_->LookupDestination(instr->false_block_id());
// If the expression is known to be untagged or a smi, then it's definitely // TODO(fsc): If the expression is known to be a smi, then it's
// not null, and it can't be a an undetectable object. // definitely not null. Jump to the false block.
if (instr->hydrogen()->representation().IsSpecialization() ||
instr->hydrogen()->type().IsSmi()) {
EmitGoto(false_block);
return;
}
int true_block = chunk_->LookupDestination(instr->true_block_id()); int true_block = chunk_->LookupDestination(instr->true_block_id());
Handle<Object> nil_value = instr->nil() == kNullValue ? int false_block = chunk_->LookupDestination(instr->false_block_id());
factory()->null_value() :
factory()->undefined_value(); __ cmp(reg, factory()->null_value());
__ cmp(reg, nil_value); if (instr->is_strict()) {
if (instr->kind() == kStrictEquality) {
EmitBranch(true_block, false_block, equal); EmitBranch(true_block, false_block, equal);
} else { } else {
Handle<Object> other_nil_value = instr->nil() == kNullValue ?
factory()->undefined_value() :
factory()->null_value();
Label* true_label = chunk_->GetAssemblyLabel(true_block); Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block); Label* false_label = chunk_->GetAssemblyLabel(false_block);
__ j(equal, true_label); __ j(equal, true_label);
__ cmp(reg, other_nil_value); __ cmp(reg, factory()->undefined_value());
__ j(equal, true_label); __ j(equal, true_label);
__ JumpIfSmi(reg, false_label); __ JumpIfSmi(reg, false_label);
// Check for undetectable objects by looking in the bit field in // Check for undetectable objects by looking in the bit field in
@ -1802,36 +1745,28 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
ASSERT(!input.is(temp)); ASSERT(!input.is(temp));
ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register. ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
__ JumpIfSmi(input, is_false); __ JumpIfSmi(input, is_false);
__ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
__ j(below, is_false);
// Map is now in temp.
// Functions have class 'Function'.
__ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) { if (class_name->IsEqualTo(CStrVector("Function"))) {
// Assuming the following assertions, we can use the same compares to test __ j(above_equal, is_true);
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
FIRST_SPEC_OBJECT_TYPE + 1);
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
LAST_SPEC_OBJECT_TYPE - 1);
STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
__ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
__ j(below, is_false);
__ j(equal, is_true);
__ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
__ j(equal, is_true);
} else { } else {
// Faster code path to avoid two compares: subtract lower bound from the __ j(above_equal, is_false);
// actual type and do a signed compare with the width of the type range.
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
__ mov(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
__ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ cmpb(Operand(temp2),
static_cast<int8_t>(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(above, is_false);
} }
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function. // Check if the constructor in the map is a function.
__ mov(temp, FieldOperand(temp, Map::kConstructorOffset)); __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
// As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
// FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
// LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
// Objects with a non-function constructor have class 'Object'. // Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2); __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
if (class_name->IsEqualTo(CStrVector("Object"))) { if (class_name->IsEqualTo(CStrVector("Object"))) {
@ -1916,8 +1851,9 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
virtual void Generate() { virtual void Generate() {
codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_); codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
} }
virtual LInstruction* instr() { return instr_; }
Label* map_check() { return &map_check_; } Label* map_check() { return &map_check_; }
private: private:
LInstanceOfKnownGlobal* instr_; LInstanceOfKnownGlobal* instr_;
Label map_check_; Label map_check_;
@ -2055,17 +1991,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
} }
__ mov(esp, ebp); __ mov(esp, ebp);
__ pop(ebp); __ pop(ebp);
if (dynamic_frame_alignment_) {
Label aligned;
// Frame alignment marker (padding) is below arguments,
// and receiver, so its return-address-relative offset is
// (num_arguments + 2) words.
__ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize),
Immediate(factory()->frame_alignment_marker()));
__ j(not_equal, &aligned);
__ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
__ bind(&aligned);
}
__ Ret((GetParameterCount() + 1) * kPointerSize, ecx); __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
} }
@ -2073,7 +1998,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
__ mov(result, Operand::Cell(instr->hydrogen()->cell())); __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
if (instr->hydrogen()->RequiresHoleCheck()) { if (instr->hydrogen()->check_hole_value()) {
__ cmp(result, factory()->the_hole_value()); __ cmp(result, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment()); DeoptimizeIf(equal, instr->environment());
} }
@ -2094,34 +2019,20 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
Register object = ToRegister(instr->TempAt(0));
Register address = ToRegister(instr->TempAt(1));
Register value = ToRegister(instr->InputAt(0)); Register value = ToRegister(instr->InputAt(0));
ASSERT(!value.is(object)); Operand cell_operand = Operand::Cell(instr->hydrogen()->cell());
Handle<JSGlobalPropertyCell> cell_handle(instr->hydrogen()->cell());
int offset = JSGlobalPropertyCell::kValueOffset;
__ mov(object, Immediate(cell_handle));
// If the cell we are storing to contains the hole it could have // If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need // been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark // to update the property details in the property dictionary to mark
// it as no longer deleted. We deoptimize in that case. // it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->RequiresHoleCheck()) { if (instr->hydrogen()->check_hole_value()) {
__ cmp(FieldOperand(object, offset), factory()->the_hole_value()); __ cmp(cell_operand, factory()->the_hole_value());
DeoptimizeIf(equal, instr->environment()); DeoptimizeIf(equal, instr->environment());
} }
// Store the value. // Store the value.
__ mov(FieldOperand(object, offset), value); __ mov(cell_operand, value);
// Cells are always in the remembered set.
__ RecordWriteField(object,
offset,
value,
address,
kSaveFPRegs,
OMIT_REMEMBERED_SET);
} }
@ -2152,7 +2063,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
if (instr->needs_write_barrier()) { if (instr->needs_write_barrier()) {
Register temp = ToRegister(instr->TempAt(0)); Register temp = ToRegister(instr->TempAt(0));
int offset = Context::SlotOffset(instr->slot_index()); int offset = Context::SlotOffset(instr->slot_index());
__ RecordWriteContextSlot(context, offset, value, temp, kSaveFPRegs); __ RecordWrite(context, offset, value, temp);
} }
} }
@ -2369,14 +2280,16 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
LLoadKeyedFastDoubleElement* instr) { LLoadKeyedFastDoubleElement* instr) {
XMMRegister result = ToDoubleRegister(instr->result()); XMMRegister result = ToDoubleRegister(instr->result());
int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + if (instr->hydrogen()->RequiresHoleCheck()) {
sizeof(kHoleNanLower32); int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
Operand hole_check_operand = BuildFastArrayOperand( sizeof(kHoleNanLower32);
instr->elements(), instr->key(), Operand hole_check_operand = BuildFastArrayOperand(
FAST_DOUBLE_ELEMENTS, instr->elements(), instr->key(),
offset); FAST_DOUBLE_ELEMENTS,
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); offset);
DeoptimizeIf(equal, instr->environment()); __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
DeoptimizeIf(equal, instr->environment());
}
Operand double_load_operand = BuildFastArrayOperand( Operand double_load_operand = BuildFastArrayOperand(
instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS, instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
@ -2446,7 +2359,6 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break; break;
case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
@ -2768,7 +2680,6 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
virtual void Generate() { virtual void Generate() {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
} }
virtual LInstruction* instr() { return instr_; }
private: private:
LUnaryMathOperation* instr_; LUnaryMathOperation* instr_;
}; };
@ -3094,7 +3005,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->result()).is(eax)); ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity(); int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS); CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ Drop(1); __ Drop(1);
} }
@ -3151,7 +3062,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->needs_write_barrier()) { if (instr->needs_write_barrier()) {
Register temp = ToRegister(instr->TempAt(0)); Register temp = ToRegister(instr->TempAt(0));
// Update the write barrier for the object for in-object properties. // Update the write barrier for the object for in-object properties.
__ RecordWriteField(object, offset, value, temp, kSaveFPRegs); __ RecordWrite(object, offset, value, temp);
} }
} else { } else {
Register temp = ToRegister(instr->TempAt(0)); Register temp = ToRegister(instr->TempAt(0));
@ -3160,7 +3071,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->needs_write_barrier()) { if (instr->needs_write_barrier()) {
// Update the write barrier for the properties array. // Update the write barrier for the properties array.
// object is used as a scratch register. // object is used as a scratch register.
__ RecordWriteField(temp, offset, value, object, kSaveFPRegs); __ RecordWrite(temp, offset, value, object);
} }
} }
} }
@ -3219,7 +3130,6 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
break; break;
case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
@ -3236,13 +3146,6 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register elements = ToRegister(instr->object()); Register elements = ToRegister(instr->object());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
// This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
// conversion, so it deopts in that case.
if (instr->hydrogen()->ValueNeedsSmiCheck()) {
__ test(value, Immediate(kSmiTagMask));
DeoptimizeIf(not_zero, instr->environment());
}
// Do the store. // Do the store.
if (instr->key()->IsConstantOperand()) { if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@ -3265,7 +3168,7 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
key, key,
times_pointer_size, times_pointer_size,
FixedArray::kHeaderSize)); FixedArray::kHeaderSize));
__ RecordWrite(elements, key, value, kSaveFPRegs); __ RecordWrite(elements, key, value);
} }
} }
@ -3309,7 +3212,6 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { } : LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
virtual LInstruction* instr() { return instr_; }
private: private:
LStringCharCodeAt* instr_; LStringCharCodeAt* instr_;
}; };
@ -3432,7 +3334,6 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { } : LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); } virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
virtual LInstruction* instr() { return instr_; }
private: private:
LStringCharFromCode* instr_; LStringCharFromCode* instr_;
}; };
@ -3512,7 +3413,6 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { } : LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); } virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
virtual LInstruction* instr() { return instr_; }
private: private:
LNumberTagI* instr_; LNumberTagI* instr_;
}; };
@ -3580,7 +3480,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { } : LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
virtual LInstruction* instr() { return instr_; }
private: private:
LNumberTagD* instr_; LNumberTagD* instr_;
}; };
@ -3682,6 +3581,16 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
} }
class DeferredTaggedToI: public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
private:
LTaggedToI* instr_;
};
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Label done, heap_number; Label done, heap_number;
Register input_reg = ToRegister(instr->InputAt(0)); Register input_reg = ToRegister(instr->InputAt(0));
@ -3763,16 +3672,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) { void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
class DeferredTaggedToI: public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LTaggedToI* instr_;
};
LOperand* input = instr->InputAt(0); LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister()); ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result())); ASSERT(input->Equals(instr->result()));
@ -3983,16 +3882,9 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckFunction(LCheckFunction* instr) { void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
Handle<JSFunction> target = instr->hydrogen()->target(); ASSERT(instr->InputAt(0)->IsRegister());
if (isolate()->heap()->InNewSpace(*target)) { Operand operand = ToOperand(instr->InputAt(0));
Register reg = ToRegister(instr->value()); __ cmp(operand, instr->hydrogen()->target());
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(target);
__ cmp(reg, Operand::Cell(cell));
} else {
Operand operand = ToOperand(instr->value());
__ cmp(operand, instr->hydrogen()->target());
}
DeoptimizeIf(not_equal, instr->environment()); DeoptimizeIf(not_equal, instr->environment());
} }
@ -4296,12 +4188,10 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = not_zero; final_branch_condition = not_zero;
} else if (type_name->Equals(heap()->function_symbol())) { } else if (type_name->Equals(heap()->function_symbol())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
__ JumpIfSmi(input, false_label); __ JumpIfSmi(input, false_label);
__ CmpObjectType(input, JS_FUNCTION_TYPE, input); __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
__ j(equal, true_label); final_branch_condition = above_equal;
__ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
final_branch_condition = equal;
} else if (type_name->Equals(heap()->object_symbol())) { } else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label); __ JumpIfSmi(input, false_label);
@ -4413,7 +4303,6 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { } : LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); } virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
virtual LInstruction* instr() { return instr_; }
private: private:
LStackCheck* instr_; LStackCheck* instr_;
}; };

13
deps/v8/src/ia32/lithium-codegen-ia32.h

@ -58,7 +58,6 @@ class LCodeGen BASE_EMBEDDED {
inlined_function_count_(0), inlined_function_count_(0),
scope_(info->scope()), scope_(info->scope()),
status_(UNUSED), status_(UNUSED),
dynamic_frame_alignment_(false),
deferred_(8), deferred_(8),
osr_pc_offset_(-1), osr_pc_offset_(-1),
deoptimization_reloc_size(), deoptimization_reloc_size(),
@ -134,10 +133,6 @@ class LCodeGen BASE_EMBEDDED {
int strict_mode_flag() const { int strict_mode_flag() const {
return info()->is_strict_mode() ? kStrictMode : kNonStrictMode; return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
} }
bool dynamic_frame_alignment() const { return dynamic_frame_alignment_; }
void set_dynamic_frame_alignment(bool value) {
dynamic_frame_alignment_ = value;
}
LChunk* chunk() const { return chunk_; } LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; } Scope* scope() const { return scope_; }
@ -302,7 +297,6 @@ class LCodeGen BASE_EMBEDDED {
int inlined_function_count_; int inlined_function_count_;
Scope* const scope_; Scope* const scope_;
Status status_; Status status_;
bool dynamic_frame_alignment_;
TranslationBuffer translations_; TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_; ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_; int osr_pc_offset_;
@ -352,20 +346,16 @@ class LCodeGen BASE_EMBEDDED {
class LDeferredCode: public ZoneObject { class LDeferredCode: public ZoneObject {
public: public:
explicit LDeferredCode(LCodeGen* codegen) explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen), : codegen_(codegen), external_exit_(NULL) {
external_exit_(NULL),
instruction_index_(codegen->current_instruction_) {
codegen->AddDeferredCode(this); codegen->AddDeferredCode(this);
} }
virtual ~LDeferredCode() { } virtual ~LDeferredCode() { }
virtual void Generate() = 0; virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label *exit) { external_exit_ = exit; } void SetExit(Label *exit) { external_exit_ = exit; }
Label* entry() { return &entry_; } Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }
protected: protected:
LCodeGen* codegen() const { return codegen_; } LCodeGen* codegen() const { return codegen_; }
@ -376,7 +366,6 @@ class LDeferredCode: public ZoneObject {
Label entry_; Label entry_;
Label exit_; Label exit_;
Label* external_exit_; Label* external_exit_;
int instruction_index_;
}; };
} } // namespace v8::internal } } // namespace v8::internal

64
deps/v8/src/ia32/lithium-ia32.cc

@ -214,11 +214,10 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
} }
void LIsNilAndBranch::PrintDataTo(StringStream* stream) { void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if "); stream->Add("if ");
InputAt(0)->PrintTo(stream); InputAt(0)->PrintTo(stream);
stream->Add(kind() == kStrictEquality ? " === " : " == "); stream->Add(is_strict() ? " === null" : " == null");
stream->Add(nil() == kNullValue ? "null" : "undefined");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
} }
@ -352,11 +351,7 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
int LChunk::GetNextSpillIndex(bool is_double) { int LChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot. // Skip a slot if for a double-width slot.
if (is_double) { if (is_double) spill_slot_count_++;
spill_slot_count_ |= 1; // Make it odd, so incrementing makes it even.
spill_slot_count_++;
num_double_slots_++;
}
return spill_slot_count_++; return spill_slot_count_++;
} }
@ -712,9 +707,7 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment(); HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0; instr->set_environment(CreateEnvironment(hydrogen_env));
instr->set_environment(CreateEnvironment(hydrogen_env,
&argument_index_accumulator));
return instr; return instr;
} }
@ -1001,13 +994,10 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
} }
LEnvironment* LChunkBuilder::CreateEnvironment( LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
HEnvironment* hydrogen_env,
int* argument_index_accumulator) {
if (hydrogen_env == NULL) return NULL; if (hydrogen_env == NULL) return NULL;
LEnvironment* outer = LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id(); int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber); ASSERT(ast_id != AstNode::kNoNumber);
int value_count = hydrogen_env->length(); int value_count = hydrogen_env->length();
@ -1017,6 +1007,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
argument_count_, argument_count_,
value_count, value_count,
outer); outer);
int argument_index = 0;
for (int i = 0; i < value_count; ++i) { for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue; if (hydrogen_env->is_special_index(i)) continue;
@ -1025,7 +1016,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
if (value->IsArgumentsObject()) { if (value->IsArgumentsObject()) {
op = NULL; op = NULL;
} else if (value->IsPushArgument()) { } else if (value->IsPushArgument()) {
op = new LArgument((*argument_index_accumulator)++); op = new LArgument(argument_index++);
} else { } else {
op = UseAny(value); op = UseAny(value);
} }
@ -1480,10 +1471,10 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
} }
LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) { LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
// We only need a temp register for non-strict compare. // We only need a temp register for non-strict compare.
LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister(); LOperand* temp = instr->is_strict() ? NULL : TempRegister();
return new LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp); return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
} }
@ -1692,13 +1683,7 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) { LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
// If the target is in new space, we'll emit a global cell compare and so LOperand* value = UseAtStart(instr->value());
// want the value in a register. If the target gets promoted before we
// emit code, we will still get the register but will do an immediate
// compare instead of the cell compare. This is safe.
LOperand* value = Isolate::Current()->heap()->InNewSpace(*instr->target())
? UseRegisterAtStart(instr->value())
: UseAtStart(instr->value());
return AssignEnvironment(new LCheckFunction(value)); return AssignEnvironment(new LCheckFunction(value));
} }
@ -1785,7 +1770,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) { LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LLoadGlobalCell* result = new LLoadGlobalCell; LLoadGlobalCell* result = new LLoadGlobalCell;
return instr->RequiresHoleCheck() return instr->check_hole_value()
? AssignEnvironment(DefineAsRegister(result)) ? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result); : DefineAsRegister(result);
} }
@ -1801,10 +1786,8 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) { LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LStoreGlobalCell* result = LStoreGlobalCell* result =
new LStoreGlobalCell(UseTempRegister(instr->value()), new LStoreGlobalCell(UseRegisterAtStart(instr->value()));
TempRegister(), return instr->check_hole_value() ? AssignEnvironment(result) : result;
TempRegister());
return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
} }
@ -1825,13 +1808,15 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
LOperand* context;
LOperand* value; LOperand* value;
LOperand* temp; LOperand* temp;
LOperand* context = UseRegister(instr->context());
if (instr->NeedsWriteBarrier()) { if (instr->NeedsWriteBarrier()) {
context = UseTempRegister(instr->context());
value = UseTempRegister(instr->value()); value = UseTempRegister(instr->value());
temp = TempRegister(); temp = TempRegister();
} else { } else {
context = UseRegister(instr->context());
value = UseRegister(instr->value()); value = UseRegister(instr->value());
temp = NULL; temp = NULL;
} }
@ -1959,7 +1944,7 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
ASSERT(instr->object()->representation().IsTagged()); ASSERT(instr->object()->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32()); ASSERT(instr->key()->representation().IsInteger32());
LOperand* obj = UseRegister(instr->object()); LOperand* obj = UseTempRegister(instr->object());
LOperand* val = needs_write_barrier LOperand* val = needs_write_barrier
? UseTempRegister(instr->value()) ? UseTempRegister(instr->value())
: UseRegisterAtStart(instr->value()); : UseRegisterAtStart(instr->value());
@ -2036,14 +2021,9 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier(); bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* obj; LOperand* obj = needs_write_barrier
if (needs_write_barrier) { ? UseTempRegister(instr->object())
obj = instr->is_in_object() : UseRegisterAtStart(instr->object());
? UseRegister(instr->object())
: UseTempRegister(instr->object());
} else {
obj = UseRegisterAtStart(instr->object());
}
LOperand* val = needs_write_barrier LOperand* val = needs_write_barrier
? UseTempRegister(instr->value()) ? UseTempRegister(instr->value())

28
deps/v8/src/ia32/lithium-ia32.h

@ -101,7 +101,7 @@ class LCodeGen;
V(Integer32ToDouble) \ V(Integer32ToDouble) \
V(InvokeFunction) \ V(InvokeFunction) \
V(IsConstructCallAndBranch) \ V(IsConstructCallAndBranch) \
V(IsNilAndBranch) \ V(IsNullAndBranch) \
V(IsObjectAndBranch) \ V(IsObjectAndBranch) \
V(IsSmiAndBranch) \ V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \ V(IsUndetectableAndBranch) \
@ -615,18 +615,17 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
}; };
class LIsNilAndBranch: public LControlInstruction<1, 1> { class LIsNullAndBranch: public LControlInstruction<1, 1> {
public: public:
LIsNilAndBranch(LOperand* value, LOperand* temp) { LIsNullAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value; inputs_[0] = value;
temps_[0] = temp; temps_[0] = temp;
} }
DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch") DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch) DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
EqualityKind kind() const { return hydrogen()->kind(); } bool is_strict() const { return hydrogen()->is_strict(); }
NilValue nil() const { return hydrogen()->nil(); }
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
}; };
@ -1231,12 +1230,10 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> {
}; };
class LStoreGlobalCell: public LTemplateInstruction<0, 1, 2> { class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> {
public: public:
explicit LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) { explicit LStoreGlobalCell(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
temps_[0] = temp1;
temps_[1] = temp2;
} }
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell") DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
@ -1801,8 +1798,6 @@ class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value; inputs_[0] = value;
} }
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function") DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction) DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
}; };
@ -2075,7 +2070,6 @@ class LChunk: public ZoneObject {
graph_(graph), graph_(graph),
instructions_(32), instructions_(32),
pointer_maps_(8), pointer_maps_(8),
num_double_slots_(0),
inlined_closures_(1) { } inlined_closures_(1) { }
void AddInstruction(LInstruction* instruction, HBasicBlock* block); void AddInstruction(LInstruction* instruction, HBasicBlock* block);
@ -2089,8 +2083,6 @@ class LChunk: public ZoneObject {
int ParameterAt(int index); int ParameterAt(int index);
int GetParameterStackSlot(int index) const; int GetParameterStackSlot(int index) const;
int spill_slot_count() const { return spill_slot_count_; } int spill_slot_count() const { return spill_slot_count_; }
int num_double_slots() const { return num_double_slots_; }
CompilationInfo* info() const { return info_; } CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; } HGraph* graph() const { return graph_; }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; } const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
@ -2132,7 +2124,6 @@ class LChunk: public ZoneObject {
HGraph* const graph_; HGraph* const graph_;
ZoneList<LInstruction*> instructions_; ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_; ZoneList<LPointerMap*> pointer_maps_;
int num_double_slots_;
ZoneList<Handle<JSFunction> > inlined_closures_; ZoneList<Handle<JSFunction> > inlined_closures_;
}; };
@ -2268,8 +2259,7 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* instr, int ast_id); LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment(); void ClearInstructionPendingDeoptimizationEnvironment();
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env, LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
int* argument_index_accumulator);
void VisitInstruction(HInstruction* current); void VisitInstruction(HInstruction* current);

737
deps/v8/src/ia32/macro-assembler-ia32.cc

File diff suppressed because it is too large

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save