Browse Source

Upgrade V8 to 2.3.8

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
91757fa840
  1. 1
      deps/v8/AUTHORS
  2. 42
      deps/v8/ChangeLog
  3. 3
      deps/v8/SConstruct
  4. 30
      deps/v8/include/v8-profiler.h
  5. 10
      deps/v8/include/v8.h
  6. 8
      deps/v8/src/SConscript
  7. 324
      deps/v8/src/SConscript.orig
  8. 2
      deps/v8/src/accessors.cc
  9. 121
      deps/v8/src/api.cc
  10. 23
      deps/v8/src/arm/assembler-arm-inl.h
  11. 15
      deps/v8/src/arm/assembler-arm.cc
  12. 4
      deps/v8/src/arm/assembler-arm.h
  13. 25
      deps/v8/src/arm/builtins-arm.cc
  14. 666
      deps/v8/src/arm/codegen-arm.cc
  15. 23
      deps/v8/src/arm/codegen-arm.h
  16. 8
      deps/v8/src/arm/debug-arm.cc
  17. 8
      deps/v8/src/arm/disasm-arm.cc
  18. 241
      deps/v8/src/arm/fast-codegen-arm.cc
  19. 184
      deps/v8/src/arm/full-codegen-arm.cc
  20. 12
      deps/v8/src/arm/macro-assembler-arm.cc
  21. 3
      deps/v8/src/arm/macro-assembler-arm.h
  22. 10
      deps/v8/src/arm/simulator-arm.cc
  23. 32
      deps/v8/src/arm/stub-cache-arm.cc
  24. 1
      deps/v8/src/assembler.h
  25. 18
      deps/v8/src/bootstrapper.cc
  26. 2
      deps/v8/src/builtins.h
  27. 10
      deps/v8/src/checks.h
  28. 15
      deps/v8/src/codegen.cc
  29. 24
      deps/v8/src/codegen.h
  30. 28
      deps/v8/src/compiler.cc
  31. 53
      deps/v8/src/compiler.h
  32. 2
      deps/v8/src/contexts.h
  33. 11
      deps/v8/src/cpu-profiler.cc
  34. 2
      deps/v8/src/cpu-profiler.h
  35. 33
      deps/v8/src/debug.cc
  36. 10
      deps/v8/src/debug.h
  37. 14
      deps/v8/src/factory.cc
  38. 4
      deps/v8/src/factory.h
  39. 746
      deps/v8/src/fast-codegen.cc
  40. 161
      deps/v8/src/fast-codegen.h
  41. 3
      deps/v8/src/flag-definitions.h
  42. 5
      deps/v8/src/full-codegen.cc
  43. 9
      deps/v8/src/full-codegen.h
  44. 25
      deps/v8/src/globals.h
  45. 2
      deps/v8/src/handles-inl.h
  46. 35
      deps/v8/src/handles.cc
  47. 3
      deps/v8/src/handles.h
  48. 6
      deps/v8/src/heap-profiler.cc
  49. 522
      deps/v8/src/heap.cc
  50. 6
      deps/v8/src/heap.h
  51. 24
      deps/v8/src/ia32/assembler-ia32-inl.h
  52. 15
      deps/v8/src/ia32/assembler-ia32.cc
  53. 1
      deps/v8/src/ia32/assembler-ia32.h
  54. 22
      deps/v8/src/ia32/builtins-ia32.cc
  55. 722
      deps/v8/src/ia32/codegen-ia32.cc
  56. 33
      deps/v8/src/ia32/codegen-ia32.h
  57. 44
      deps/v8/src/ia32/debug-ia32.cc
  58. 1
      deps/v8/src/ia32/disasm-ia32.cc
  59. 954
      deps/v8/src/ia32/fast-codegen-ia32.cc
  60. 155
      deps/v8/src/ia32/fast-codegen-ia32.h
  61. 175
      deps/v8/src/ia32/full-codegen-ia32.cc
  62. 66
      deps/v8/src/ia32/macro-assembler-ia32.cc
  63. 33
      deps/v8/src/ia32/macro-assembler-ia32.h
  64. 32
      deps/v8/src/ia32/stub-cache-ia32.cc
  65. 16
      deps/v8/src/ia32/virtual-frame-ia32.h
  66. 7
      deps/v8/src/list-inl.h
  67. 2
      deps/v8/src/list.h
  68. 7
      deps/v8/src/liveedit-debugger.js
  69. 42
      deps/v8/src/liveedit.cc
  70. 256
      deps/v8/src/mark-compact.cc
  71. 10
      deps/v8/src/mark-compact.h
  72. 12
      deps/v8/src/messages.js
  73. 9
      deps/v8/src/mips/debug-mips.cc
  74. 7
      deps/v8/src/mips/simulator-mips.cc
  75. 112
      deps/v8/src/objects-inl.h
  76. 139
      deps/v8/src/objects-visiting.cc
  77. 382
      deps/v8/src/objects-visiting.h
  78. 178
      deps/v8/src/objects.cc
  79. 140
      deps/v8/src/objects.h
  80. 9
      deps/v8/src/parser.cc
  81. 5
      deps/v8/src/platform-linux.cc
  82. 6
      deps/v8/src/platform-nullos.cc
  83. 7
      deps/v8/src/platform-posix.cc
  84. 5
      deps/v8/src/platform-win32.cc
  85. 3
      deps/v8/src/platform.h
  86. 580
      deps/v8/src/platform.h.orig
  87. 18
      deps/v8/src/profile-generator-inl.h
  88. 1234
      deps/v8/src/profile-generator.cc
  89. 491
      deps/v8/src/profile-generator.h
  90. 6
      deps/v8/src/property.h
  91. 16
      deps/v8/src/runtime.cc
  92. 8
      deps/v8/src/runtime.js
  93. 8
      deps/v8/src/serialize.cc
  94. 2
      deps/v8/src/serialize.h
  95. 17
      deps/v8/src/stub-cache.cc
  96. 3
      deps/v8/src/stub-cache.h
  97. 129
      deps/v8/src/third_party/dtoa/dtoa.c
  98. 12
      deps/v8/src/top.cc
  99. 16
      deps/v8/src/type-info.h
  100. 6
      deps/v8/src/utils.h

1
deps/v8/AUTHORS

@ -29,4 +29,5 @@ Rodolph Perfetta <rodolph.perfetta@arm.com>
Ryan Dahl <coldredlemur@gmail.com> Ryan Dahl <coldredlemur@gmail.com>
Subrato K De <subratokde@codeaurora.org> Subrato K De <subratokde@codeaurora.org>
Burcu Dogan <burcujdogan@gmail.com> Burcu Dogan <burcujdogan@gmail.com>
Vlad Burlik <vladbph@gmail.com>

42
deps/v8/ChangeLog

@ -1,11 +1,45 @@
2010-08-16: Version 2.3.8
Fixed build with strict aliasing on GCC 4.4 (issue 463).
Fixed issue with incorrect handling of custom valueOf methods on
string wrappers (issue 760).
Fixed compilation for ARMv4 (issue 590).
Improved performance.
2010-08-11: Version 2.3.7
Reduced size of heap snapshots produced by heap profiler (issue 783).
Introduced v8::Value::IsRegExp method.
Fixed CPU profiler crash in start / stop sequence when non-existent
name is passed (issue http://crbug.com/51594).
Introduced new indexed property query callbacks API (issue 816). This
API is guarded by USE_NEW_QUERY_CALLBACK define and is disabled
by default.
Removed support for object literal get/set with number/string
property name.
Fixed handling of JSObject::elements in CalculateNetworkSize
(issue 822).
Allow compiling with strict aliasing enabled on GCC 4.4 (issue 463).
2010-08-09: Version 2.3.6 2010-08-09: Version 2.3.6
RegExp literals create a new object every time they are evaluated RegExp literals create a new object every time they are evaluated
(issue 704). (issue 704).
Object.seal and Object.freeze return the modified object (issue 809). Object.seal and Object.freeze return the modified object (issue 809).
Fix building using GCC 4.4.4. Fix building using GCC 4.4.4.
2010-08-04: Version 2.3.5 2010-08-04: Version 2.3.5

3
deps/v8/SConstruct

@ -58,7 +58,7 @@ else:
# on linux we need these compiler flags to avoid crashes in the v8 test suite # on linux we need these compiler flags to avoid crashes in the v8 test suite
# and avoid dtoa.c strict aliasing issues # and avoid dtoa.c strict aliasing issues
if os.environ.get('GCC_VERSION') == '44': if os.environ.get('GCC_VERSION') == '44':
GCC_EXTRA_CCFLAGS = ['-fno-tree-vrp', '-fno-strict-aliasing'] GCC_EXTRA_CCFLAGS = ['-fno-tree-vrp']
GCC_DTOA_EXTRA_CCFLAGS = [] GCC_DTOA_EXTRA_CCFLAGS = []
else: else:
GCC_EXTRA_CCFLAGS = [] GCC_EXTRA_CCFLAGS = []
@ -80,7 +80,6 @@ ANDROID_FLAGS = ['-march=armv7-a',
'-frerun-cse-after-loop', '-frerun-cse-after-loop',
'-frename-registers', '-frename-registers',
'-fomit-frame-pointer', '-fomit-frame-pointer',
'-fno-strict-aliasing',
'-finline-limit=64', '-finline-limit=64',
'-DCAN_USE_VFP_INSTRUCTIONS=1', '-DCAN_USE_VFP_INSTRUCTIONS=1',
'-DCAN_USE_ARMV7_INSTRUCTIONS=1', '-DCAN_USE_ARMV7_INSTRUCTIONS=1',

30
deps/v8/include/v8-profiler.h

@ -194,10 +194,10 @@ class HeapGraphNode;
class V8EXPORT HeapGraphEdge { class V8EXPORT HeapGraphEdge {
public: public:
enum Type { enum Type {
CONTEXT_VARIABLE = 0, // A variable from a function context. kContextVariable = 0, // A variable from a function context.
ELEMENT = 1, // An element of an array. kElement = 1, // An element of an array.
PROPERTY = 2, // A named object property. kProperty = 2, // A named object property.
INTERNAL = 3 // A link that can't be accessed from JS, kInternal = 3 // A link that can't be accessed from JS,
// thus, its name isn't a real property name. // thus, its name isn't a real property name.
}; };
@ -240,12 +240,12 @@ class V8EXPORT HeapGraphPath {
class V8EXPORT HeapGraphNode { class V8EXPORT HeapGraphNode {
public: public:
enum Type { enum Type {
INTERNAL = 0, // Internal node, a virtual one, for housekeeping. kInternal = 0, // Internal node, a virtual one, for housekeeping.
ARRAY = 1, // An array of elements. kArray = 1, // An array of elements.
STRING = 2, // A string. kString = 2, // A string.
OBJECT = 3, // A JS object (except for arrays and strings). kObject = 3, // A JS object (except for arrays and strings).
CODE = 4, // Compiled code. kCode = 4, // Compiled code.
CLOSURE = 5 // Function closure. kClosure = 5 // Function closure.
}; };
/** Returns node type (see HeapGraphNode::Type). */ /** Returns node type (see HeapGraphNode::Type). */
@ -268,13 +268,15 @@ class V8EXPORT HeapGraphNode {
int GetSelfSize() const; int GetSelfSize() const;
/** Returns node's network (self + reachable nodes) size, in bytes. */ /** Returns node's network (self + reachable nodes) size, in bytes. */
int GetTotalSize() const; int GetReachableSize() const;
/** /**
* Returns node's private size, in bytes. That is, the size of memory * Returns node's retained size, in bytes. That is, self + sizes of
* that will be reclaimed having this node collected. * the objects that are reachable only from this object. In other
* words, the size of memory that will be reclaimed having this node
* collected.
*/ */
int GetPrivateSize() const; int GetRetainedSize() const;
/** Returns child nodes count of the node. */ /** Returns child nodes count of the node. */
int GetChildrenCount() const; int GetChildrenCount() const;

10
deps/v8/include/v8.h

@ -919,6 +919,11 @@ class Value : public Data {
*/ */
V8EXPORT bool IsDate() const; V8EXPORT bool IsDate() const;
/**
* Returns true if this value is a RegExp.
*/
V8EXPORT bool IsRegExp() const;
V8EXPORT Local<Boolean> ToBoolean() const; V8EXPORT Local<Boolean> ToBoolean() const;
V8EXPORT Local<Number> ToNumber() const; V8EXPORT Local<Number> ToNumber() const;
V8EXPORT Local<String> ToString() const; V8EXPORT Local<String> ToString() const;
@ -1819,9 +1824,9 @@ typedef Handle<Value> (*IndexedPropertySetter)(uint32_t index,
/** /**
* Returns a non-empty handle if the interceptor intercepts the request. * Returns a non-empty handle if the interceptor intercepts the request.
* The result is true if the property exists and false otherwise. * The result is an integer encoding property attributes.
*/ */
typedef Handle<Boolean> (*IndexedPropertyQuery)(uint32_t index, typedef Handle<Integer> (*IndexedPropertyQuery)(uint32_t index,
const AccessorInfo& info); const AccessorInfo& info);
/** /**
@ -2140,6 +2145,7 @@ class V8EXPORT ObjectTemplate : public Template {
IndexedPropertyDeleter deleter = 0, IndexedPropertyDeleter deleter = 0,
IndexedPropertyEnumerator enumerator = 0, IndexedPropertyEnumerator enumerator = 0,
Handle<Value> data = Handle<Value>()); Handle<Value> data = Handle<Value>());
/** /**
* Sets the callback to be used when calling instances created from * Sets the callback to be used when calling instances created from
* this template as a function. If no callback is set, instances * this template as a function. If no callback is set, instances

8
deps/v8/src/SConscript

@ -84,6 +84,7 @@ SOURCES = {
mark-compact.cc mark-compact.cc
messages.cc messages.cc
objects.cc objects.cc
objects-visiting.cc
oprofile-agent.cc oprofile-agent.cc
parser.cc parser.cc
profile-generator.cc profile-generator.cc
@ -117,7 +118,6 @@ SOURCES = {
zone.cc zone.cc
"""), """),
'arch:arm': Split(""" 'arch:arm': Split("""
fast-codegen.cc
jump-target-light.cc jump-target-light.cc
virtual-frame-light.cc virtual-frame-light.cc
arm/builtins-arm.cc arm/builtins-arm.cc
@ -126,7 +126,6 @@ SOURCES = {
arm/cpu-arm.cc arm/cpu-arm.cc
arm/debug-arm.cc arm/debug-arm.cc
arm/disasm-arm.cc arm/disasm-arm.cc
arm/fast-codegen-arm.cc
arm/frames-arm.cc arm/frames-arm.cc
arm/full-codegen-arm.cc arm/full-codegen-arm.cc
arm/ic-arm.cc arm/ic-arm.cc
@ -139,7 +138,6 @@ SOURCES = {
arm/assembler-arm.cc arm/assembler-arm.cc
"""), """),
'arch:mips': Split(""" 'arch:mips': Split("""
fast-codegen.cc
mips/assembler-mips.cc mips/assembler-mips.cc
mips/builtins-mips.cc mips/builtins-mips.cc
mips/codegen-mips.cc mips/codegen-mips.cc
@ -147,7 +145,6 @@ SOURCES = {
mips/cpu-mips.cc mips/cpu-mips.cc
mips/debug-mips.cc mips/debug-mips.cc
mips/disasm-mips.cc mips/disasm-mips.cc
mips/fast-codegen-mips.cc
mips/full-codegen-mips.cc mips/full-codegen-mips.cc
mips/frames-mips.cc mips/frames-mips.cc
mips/ic-mips.cc mips/ic-mips.cc
@ -166,7 +163,6 @@ SOURCES = {
ia32/cpu-ia32.cc ia32/cpu-ia32.cc
ia32/debug-ia32.cc ia32/debug-ia32.cc
ia32/disasm-ia32.cc ia32/disasm-ia32.cc
ia32/fast-codegen-ia32.cc
ia32/frames-ia32.cc ia32/frames-ia32.cc
ia32/full-codegen-ia32.cc ia32/full-codegen-ia32.cc
ia32/ic-ia32.cc ia32/ic-ia32.cc
@ -178,7 +174,6 @@ SOURCES = {
ia32/virtual-frame-ia32.cc ia32/virtual-frame-ia32.cc
"""), """),
'arch:x64': Split(""" 'arch:x64': Split("""
fast-codegen.cc
jump-target-heavy.cc jump-target-heavy.cc
virtual-frame-heavy.cc virtual-frame-heavy.cc
x64/assembler-x64.cc x64/assembler-x64.cc
@ -187,7 +182,6 @@ SOURCES = {
x64/cpu-x64.cc x64/cpu-x64.cc
x64/debug-x64.cc x64/debug-x64.cc
x64/disasm-x64.cc x64/disasm-x64.cc
x64/fast-codegen-x64.cc
x64/frames-x64.cc x64/frames-x64.cc
x64/full-codegen-x64.cc x64/full-codegen-x64.cc
x64/ic-x64.cc x64/ic-x64.cc

324
deps/v8/src/SConscript.orig

@ -0,0 +1,324 @@
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from os.path import join, dirname, abspath
root_dir = dirname(File('SConstruct').rfile().abspath)
sys.path.append(join(root_dir, 'tools'))
import js2c
Import('context')
SOURCES = {
'all': Split("""
accessors.cc
allocation.cc
api.cc
assembler.cc
ast.cc
bootstrapper.cc
builtins.cc
checks.cc
circular-queue.cc
code-stubs.cc
codegen.cc
compilation-cache.cc
compiler.cc
contexts.cc
conversions.cc
counters.cc
cpu-profiler.cc
data-flow.cc
dateparser.cc
debug-agent.cc
debug.cc
disassembler.cc
diy-fp.cc
dtoa.cc
execution.cc
factory.cc
flags.cc
flow-graph.cc
frame-element.cc
frames.cc
full-codegen.cc
func-name-inferrer.cc
global-handles.cc
fast-dtoa.cc
fixed-dtoa.cc
handles.cc
hashmap.cc
heap-profiler.cc
heap.cc
ic.cc
interpreter-irregexp.cc
jsregexp.cc
jump-target.cc
liveedit.cc
log-utils.cc
log.cc
mark-compact.cc
messages.cc
objects.cc
objects-visiting.cc
oprofile-agent.cc
parser.cc
profile-generator.cc
property.cc
regexp-macro-assembler-irregexp.cc
regexp-macro-assembler.cc
regexp-stack.cc
register-allocator.cc
rewriter.cc
runtime.cc
scanner.cc
scopeinfo.cc
scopes.cc
serialize.cc
snapshot-common.cc
spaces.cc
string-stream.cc
stub-cache.cc
token.cc
top.cc
type-info.cc
unicode.cc
utils.cc
v8-counters.cc
v8.cc
v8threads.cc
variables.cc
version.cc
virtual-frame.cc
vm-state.cc
zone.cc
"""),
'arch:arm': Split("""
jump-target-light.cc
virtual-frame-light.cc
arm/builtins-arm.cc
arm/codegen-arm.cc
arm/constants-arm.cc
arm/cpu-arm.cc
arm/debug-arm.cc
arm/disasm-arm.cc
arm/frames-arm.cc
arm/full-codegen-arm.cc
arm/ic-arm.cc
arm/jump-target-arm.cc
arm/macro-assembler-arm.cc
arm/regexp-macro-assembler-arm.cc
arm/register-allocator-arm.cc
arm/stub-cache-arm.cc
arm/virtual-frame-arm.cc
arm/assembler-arm.cc
"""),
'arch:mips': Split("""
mips/assembler-mips.cc
mips/builtins-mips.cc
mips/codegen-mips.cc
mips/constants-mips.cc
mips/cpu-mips.cc
mips/debug-mips.cc
mips/disasm-mips.cc
mips/full-codegen-mips.cc
mips/frames-mips.cc
mips/ic-mips.cc
mips/jump-target-mips.cc
mips/macro-assembler-mips.cc
mips/register-allocator-mips.cc
mips/stub-cache-mips.cc
mips/virtual-frame-mips.cc
"""),
'arch:ia32': Split("""
jump-target-heavy.cc
virtual-frame-heavy.cc
ia32/assembler-ia32.cc
ia32/builtins-ia32.cc
ia32/codegen-ia32.cc
ia32/cpu-ia32.cc
ia32/debug-ia32.cc
ia32/disasm-ia32.cc
ia32/frames-ia32.cc
ia32/full-codegen-ia32.cc
ia32/ic-ia32.cc
ia32/jump-target-ia32.cc
ia32/macro-assembler-ia32.cc
ia32/regexp-macro-assembler-ia32.cc
ia32/register-allocator-ia32.cc
ia32/stub-cache-ia32.cc
ia32/virtual-frame-ia32.cc
"""),
'arch:x64': Split("""
jump-target-heavy.cc
virtual-frame-heavy.cc
x64/assembler-x64.cc
x64/builtins-x64.cc
x64/codegen-x64.cc
x64/cpu-x64.cc
x64/debug-x64.cc
x64/disasm-x64.cc
x64/frames-x64.cc
x64/full-codegen-x64.cc
x64/ic-x64.cc
x64/jump-target-x64.cc
x64/macro-assembler-x64.cc
x64/regexp-macro-assembler-x64.cc
x64/register-allocator-x64.cc
x64/stub-cache-x64.cc
x64/virtual-frame-x64.cc
"""),
'simulator:arm': ['arm/simulator-arm.cc'],
'simulator:mips': ['mips/simulator-mips.cc'],
'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
'os:openbsd': ['platform-openbsd.cc', 'platform-posix.cc'],
'os:linux': ['platform-linux.cc', 'platform-posix.cc'],
'os:android': ['platform-linux.cc', 'platform-posix.cc'],
'os:macos': ['platform-macos.cc', 'platform-posix.cc'],
'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'],
'os:nullos': ['platform-nullos.cc'],
'os:win32': ['platform-win32.cc'],
'mode:release': [],
'mode:debug': [
'objects-debug.cc', 'prettyprinter.cc', 'regexp-macro-assembler-tracer.cc'
]
}
D8_FILES = {
'all': [
'd8.cc', 'd8-debug.cc'
],
'os:linux': [
'd8-posix.cc'
],
'os:macos': [
'd8-posix.cc'
],
'os:android': [
'd8-posix.cc'
],
'os:freebsd': [
'd8-posix.cc'
],
'os:openbsd': [
'd8-posix.cc'
],
'os:solaris': [
'd8-posix.cc'
],
'os:win32': [
'd8-windows.cc'
],
'os:nullos': [
'd8-windows.cc' # Empty implementation at the moment.
],
'console:readline': [
'd8-readline.cc'
]
}
LIBRARY_FILES = '''
runtime.js
v8natives.js
array.js
string.js
uri.js
math.js
messages.js
apinatives.js
date.js
regexp.js
json.js
liveedit-debugger.js
mirror-debugger.js
debug-debugger.js
'''.split()
def Abort(message):
print message
sys.exit(1)
def ConfigureObjectFiles():
env = Environment()
env.Replace(**context.flags['v8'])
context.ApplyEnvOverrides(env)
env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile "$LOGFILE" --log-snapshot-positions')
# Build the standard platform-independent source files.
source_files = context.GetRelevantSources(SOURCES)
d8_files = context.GetRelevantSources(D8_FILES)
d8_js = env.JS2C('d8-js.cc', 'd8.js', TYPE='D8')
d8_js_obj = context.ConfigureObject(env, d8_js, CPPPATH=['.'])
d8_objs = [context.ConfigureObject(env, [d8_files]), d8_js_obj]
# Combine the JavaScript library files into a single C++ file and
# compile it.
library_files = [s for s in LIBRARY_FILES]
library_files.append('macros.py')
libraries_src, libraries_empty_src = env.JS2C(['libraries.cc', 'libraries-empty.cc'], library_files, TYPE='CORE')
libraries_obj = context.ConfigureObject(env, libraries_src, CPPPATH=['.'])
# Build dtoa.
dtoa_env = env.Copy()
dtoa_env.Replace(**context.flags['dtoa'])
dtoa_files = ['dtoa-config.c']
dtoa_obj = context.ConfigureObject(dtoa_env, dtoa_files)
source_objs = context.ConfigureObject(env, source_files)
non_snapshot_files = [dtoa_obj, source_objs]
# Create snapshot if necessary. For cross compilation you should either
# do without snapshots and take the performance hit or you should build a
# host VM with the simulator=arm and snapshot=on options and then take the
# resulting snapshot.cc file from obj/release and put it in the src
# directory. Then rebuild the VM with the cross compiler and specify
# snapshot=nobuild on the scons command line.
empty_snapshot_obj = context.ConfigureObject(env, 'snapshot-empty.cc')
mksnapshot_env = env.Copy()
mksnapshot_env.Replace(**context.flags['mksnapshot'])
mksnapshot_src = 'mksnapshot.cc'
mksnapshot = mksnapshot_env.Program('mksnapshot', [mksnapshot_src, libraries_obj, non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb')
if context.use_snapshot:
if context.build_snapshot:
snapshot_cc = env.Snapshot('snapshot.cc', mksnapshot, LOGFILE=File('snapshot.log').abspath)
else:
snapshot_cc = 'snapshot.cc'
snapshot_obj = context.ConfigureObject(env, snapshot_cc, CPPPATH=['.'])
else:
snapshot_obj = empty_snapshot_obj
library_objs = [non_snapshot_files, libraries_obj, snapshot_obj]
return (library_objs, d8_objs, [mksnapshot])
(library_objs, d8_objs, mksnapshot) = ConfigureObjectFiles()
Return('library_objs d8_objs mksnapshot')

2
deps/v8/src/accessors.cc

@ -488,7 +488,7 @@ Object* Accessors::FunctionGetLength(Object* object, void*) {
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it); JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
if (!found_it) return Smi::FromInt(0); if (!found_it) return Smi::FromInt(0);
// Check if already compiled. // Check if already compiled.
if (!function->is_compiled()) { if (!function->shared()->is_compiled()) {
// If the function isn't compiled yet, the length is not computed // If the function isn't compiled yet, the length is not computed
// correctly yet. Compile it now and return the right length. // correctly yet. Compile it now and return the right length.
HandleScope scope; HandleScope scope;

121
deps/v8/src/api.cc

@ -174,6 +174,8 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
heap_stats.objects_per_type = objects_per_type; heap_stats.objects_per_type = objects_per_type;
int size_per_type[LAST_TYPE + 1] = {0}; int size_per_type[LAST_TYPE + 1] = {0};
heap_stats.size_per_type = size_per_type; heap_stats.size_per_type = size_per_type;
int os_error;
heap_stats.os_error = &os_error;
int end_marker; int end_marker;
heap_stats.end_marker = &end_marker; heap_stats.end_marker = &end_marker;
i::Heap::RecordStats(&heap_stats, take_snapshot); i::Heap::RecordStats(&heap_stats, take_snapshot);
@ -1792,6 +1794,13 @@ bool Value::IsDate() const {
} }
bool Value::IsRegExp() const {
if (IsDeadCheck("v8::Value::IsRegExp()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->IsJSRegExp();
}
Local<String> Value::ToString() const { Local<String> Value::ToString() const {
if (IsDeadCheck("v8::Value::ToString()")) return Local<String>(); if (IsDeadCheck("v8::Value::ToString()")) return Local<String>();
LOG_API("ToString"); LOG_API("ToString");
@ -4491,24 +4500,27 @@ const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title,
} }
static i::HeapGraphEdge* ToInternal(const HeapGraphEdge* edge) {
return const_cast<i::HeapGraphEdge*>(
reinterpret_cast<const i::HeapGraphEdge*>(edge));
}
HeapGraphEdge::Type HeapGraphEdge::GetType() const { HeapGraphEdge::Type HeapGraphEdge::GetType() const {
IsDeadCheck("v8::HeapGraphEdge::GetType"); IsDeadCheck("v8::HeapGraphEdge::GetType");
return static_cast<HeapGraphEdge::Type>( return static_cast<HeapGraphEdge::Type>(ToInternal(this)->type());
reinterpret_cast<const i::HeapGraphEdge*>(this)->type());
} }
Handle<Value> HeapGraphEdge::GetName() const { Handle<Value> HeapGraphEdge::GetName() const {
IsDeadCheck("v8::HeapGraphEdge::GetName"); IsDeadCheck("v8::HeapGraphEdge::GetName");
const i::HeapGraphEdge* edge = i::HeapGraphEdge* edge = ToInternal(this);
reinterpret_cast<const i::HeapGraphEdge*>(this);
switch (edge->type()) { switch (edge->type()) {
case i::HeapGraphEdge::CONTEXT_VARIABLE: case i::HeapGraphEdge::kContextVariable:
case i::HeapGraphEdge::INTERNAL: case i::HeapGraphEdge::kInternal:
case i::HeapGraphEdge::PROPERTY: case i::HeapGraphEdge::kProperty:
return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol( return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
edge->name()))); edge->name())));
case i::HeapGraphEdge::ELEMENT: case i::HeapGraphEdge::kElement:
return Handle<Number>(ToApi<Number>(i::Factory::NewNumberFromInt( return Handle<Number>(ToApi<Number>(i::Factory::NewNumberFromInt(
edge->index()))); edge->index())));
default: UNREACHABLE(); default: UNREACHABLE();
@ -4519,28 +4531,32 @@ Handle<Value> HeapGraphEdge::GetName() const {
const HeapGraphNode* HeapGraphEdge::GetFromNode() const { const HeapGraphNode* HeapGraphEdge::GetFromNode() const {
IsDeadCheck("v8::HeapGraphEdge::GetFromNode"); IsDeadCheck("v8::HeapGraphEdge::GetFromNode");
const i::HeapEntry* from = const i::HeapEntry* from = ToInternal(this)->From();
reinterpret_cast<const i::HeapGraphEdge*>(this)->from();
return reinterpret_cast<const HeapGraphNode*>(from); return reinterpret_cast<const HeapGraphNode*>(from);
} }
const HeapGraphNode* HeapGraphEdge::GetToNode() const { const HeapGraphNode* HeapGraphEdge::GetToNode() const {
IsDeadCheck("v8::HeapGraphEdge::GetToNode"); IsDeadCheck("v8::HeapGraphEdge::GetToNode");
const i::HeapEntry* to = const i::HeapEntry* to = ToInternal(this)->to();
reinterpret_cast<const i::HeapGraphEdge*>(this)->to();
return reinterpret_cast<const HeapGraphNode*>(to); return reinterpret_cast<const HeapGraphNode*>(to);
} }
static i::HeapGraphPath* ToInternal(const HeapGraphPath* path) {
return const_cast<i::HeapGraphPath*>(
reinterpret_cast<const i::HeapGraphPath*>(path));
}
int HeapGraphPath::GetEdgesCount() const { int HeapGraphPath::GetEdgesCount() const {
return reinterpret_cast<const i::HeapGraphPath*>(this)->path()->length(); return ToInternal(this)->path()->length();
} }
const HeapGraphEdge* HeapGraphPath::GetEdge(int index) const { const HeapGraphEdge* HeapGraphPath::GetEdge(int index) const {
return reinterpret_cast<const HeapGraphEdge*>( return reinterpret_cast<const HeapGraphEdge*>(
reinterpret_cast<const i::HeapGraphPath*>(this)->path()->at(index)); ToInternal(this)->path()->at(index));
} }
@ -4555,137 +4571,136 @@ const HeapGraphNode* HeapGraphPath::GetToNode() const {
} }
static i::HeapEntry* ToInternal(const HeapGraphNode* entry) {
return const_cast<i::HeapEntry*>(
reinterpret_cast<const i::HeapEntry*>(entry));
}
HeapGraphNode::Type HeapGraphNode::GetType() const { HeapGraphNode::Type HeapGraphNode::GetType() const {
IsDeadCheck("v8::HeapGraphNode::GetType"); IsDeadCheck("v8::HeapGraphNode::GetType");
return static_cast<HeapGraphNode::Type>( return static_cast<HeapGraphNode::Type>(ToInternal(this)->type());
reinterpret_cast<const i::HeapEntry*>(this)->type());
} }
Handle<String> HeapGraphNode::GetName() const { Handle<String> HeapGraphNode::GetName() const {
IsDeadCheck("v8::HeapGraphNode::GetName"); IsDeadCheck("v8::HeapGraphNode::GetName");
return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol( return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
reinterpret_cast<const i::HeapEntry*>(this)->name()))); ToInternal(this)->name())));
} }
uint64_t HeapGraphNode::GetId() const { uint64_t HeapGraphNode::GetId() const {
IsDeadCheck("v8::HeapGraphNode::GetId"); IsDeadCheck("v8::HeapGraphNode::GetId");
return reinterpret_cast<const i::HeapEntry*>(this)->id(); return ToInternal(this)->id();
} }
int HeapGraphNode::GetSelfSize() const { int HeapGraphNode::GetSelfSize() const {
IsDeadCheck("v8::HeapGraphNode::GetSelfSize"); IsDeadCheck("v8::HeapGraphNode::GetSelfSize");
return reinterpret_cast<const i::HeapEntry*>(this)->self_size(); return ToInternal(this)->self_size();
} }
int HeapGraphNode::GetTotalSize() const { int HeapGraphNode::GetReachableSize() const {
IsDeadCheck("v8::HeapSnapshot::GetHead"); IsDeadCheck("v8::HeapSnapshot::GetReachableSize");
return const_cast<i::HeapEntry*>( return ToInternal(this)->ReachableSize();
reinterpret_cast<const i::HeapEntry*>(this))->TotalSize();
} }
int HeapGraphNode::GetPrivateSize() const { int HeapGraphNode::GetRetainedSize() const {
IsDeadCheck("v8::HeapSnapshot::GetPrivateSize"); IsDeadCheck("v8::HeapSnapshot::GetRetainedSize");
return const_cast<i::HeapEntry*>( return ToInternal(this)->RetainedSize();
reinterpret_cast<const i::HeapEntry*>(this))->NonSharedTotalSize();
} }
int HeapGraphNode::GetChildrenCount() const { int HeapGraphNode::GetChildrenCount() const {
IsDeadCheck("v8::HeapSnapshot::GetChildrenCount"); IsDeadCheck("v8::HeapSnapshot::GetChildrenCount");
return reinterpret_cast<const i::HeapEntry*>(this)->children()->length(); return ToInternal(this)->children().length();
} }
const HeapGraphEdge* HeapGraphNode::GetChild(int index) const { const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
IsDeadCheck("v8::HeapSnapshot::GetChild"); IsDeadCheck("v8::HeapSnapshot::GetChild");
return reinterpret_cast<const HeapGraphEdge*>( return reinterpret_cast<const HeapGraphEdge*>(
reinterpret_cast<const i::HeapEntry*>(this)->children()->at(index)); &ToInternal(this)->children()[index]);
} }
int HeapGraphNode::GetRetainersCount() const { int HeapGraphNode::GetRetainersCount() const {
IsDeadCheck("v8::HeapSnapshot::GetRetainersCount"); IsDeadCheck("v8::HeapSnapshot::GetRetainersCount");
return reinterpret_cast<const i::HeapEntry*>(this)->retainers()->length(); return ToInternal(this)->retainers().length();
} }
const HeapGraphEdge* HeapGraphNode::GetRetainer(int index) const { const HeapGraphEdge* HeapGraphNode::GetRetainer(int index) const {
IsDeadCheck("v8::HeapSnapshot::GetRetainer"); IsDeadCheck("v8::HeapSnapshot::GetRetainer");
return reinterpret_cast<const HeapGraphEdge*>( return reinterpret_cast<const HeapGraphEdge*>(
reinterpret_cast<const i::HeapEntry*>(this)->retainers()->at(index)); ToInternal(this)->retainers()[index]);
} }
int HeapGraphNode::GetRetainingPathsCount() const { int HeapGraphNode::GetRetainingPathsCount() const {
IsDeadCheck("v8::HeapSnapshot::GetRetainingPathsCount"); IsDeadCheck("v8::HeapSnapshot::GetRetainingPathsCount");
return const_cast<i::HeapEntry*>( return ToInternal(this)->GetRetainingPaths()->length();
reinterpret_cast<const i::HeapEntry*>(
this))->GetRetainingPaths()->length();
} }
const HeapGraphPath* HeapGraphNode::GetRetainingPath(int index) const { const HeapGraphPath* HeapGraphNode::GetRetainingPath(int index) const {
IsDeadCheck("v8::HeapSnapshot::GetRetainingPath"); IsDeadCheck("v8::HeapSnapshot::GetRetainingPath");
return reinterpret_cast<const HeapGraphPath*>( return reinterpret_cast<const HeapGraphPath*>(
const_cast<i::HeapEntry*>( ToInternal(this)->GetRetainingPaths()->at(index));
reinterpret_cast<const i::HeapEntry*>(
this))->GetRetainingPaths()->at(index));
} }
const HeapGraphNode* HeapSnapshotsDiff::GetAdditionsRoot() const { const HeapGraphNode* HeapSnapshotsDiff::GetAdditionsRoot() const {
IsDeadCheck("v8::HeapSnapshotsDiff::GetAdditionsRoot"); IsDeadCheck("v8::HeapSnapshotsDiff::GetAdditionsRoot");
const i::HeapSnapshotsDiff* diff = i::HeapSnapshotsDiff* diff =
reinterpret_cast<const i::HeapSnapshotsDiff*>(this); const_cast<i::HeapSnapshotsDiff*>(
reinterpret_cast<const i::HeapSnapshotsDiff*>(this));
return reinterpret_cast<const HeapGraphNode*>(diff->additions_root()); return reinterpret_cast<const HeapGraphNode*>(diff->additions_root());
} }
const HeapGraphNode* HeapSnapshotsDiff::GetDeletionsRoot() const { const HeapGraphNode* HeapSnapshotsDiff::GetDeletionsRoot() const {
IsDeadCheck("v8::HeapSnapshotsDiff::GetDeletionsRoot"); IsDeadCheck("v8::HeapSnapshotsDiff::GetDeletionsRoot");
const i::HeapSnapshotsDiff* diff = i::HeapSnapshotsDiff* diff =
reinterpret_cast<const i::HeapSnapshotsDiff*>(this); const_cast<i::HeapSnapshotsDiff*>(
reinterpret_cast<const i::HeapSnapshotsDiff*>(this));
return reinterpret_cast<const HeapGraphNode*>(diff->deletions_root()); return reinterpret_cast<const HeapGraphNode*>(diff->deletions_root());
} }
static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
return const_cast<i::HeapSnapshot*>(
reinterpret_cast<const i::HeapSnapshot*>(snapshot));
}
unsigned HeapSnapshot::GetUid() const { unsigned HeapSnapshot::GetUid() const {
IsDeadCheck("v8::HeapSnapshot::GetUid"); IsDeadCheck("v8::HeapSnapshot::GetUid");
return reinterpret_cast<const i::HeapSnapshot*>(this)->uid(); return ToInternal(this)->uid();
} }
Handle<String> HeapSnapshot::GetTitle() const { Handle<String> HeapSnapshot::GetTitle() const {
IsDeadCheck("v8::HeapSnapshot::GetTitle"); IsDeadCheck("v8::HeapSnapshot::GetTitle");
const i::HeapSnapshot* snapshot =
reinterpret_cast<const i::HeapSnapshot*>(this);
return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol( return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
snapshot->title()))); ToInternal(this)->title())));
} }
const HeapGraphNode* HeapSnapshot::GetRoot() const { const HeapGraphNode* HeapSnapshot::GetRoot() const {
IsDeadCheck("v8::HeapSnapshot::GetHead"); IsDeadCheck("v8::HeapSnapshot::GetHead");
const i::HeapSnapshot* snapshot = return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->root());
reinterpret_cast<const i::HeapSnapshot*>(this);
return reinterpret_cast<const HeapGraphNode*>(snapshot->const_root());
} }
const HeapSnapshotsDiff* HeapSnapshot::CompareWith( const HeapSnapshotsDiff* HeapSnapshot::CompareWith(
const HeapSnapshot* snapshot) const { const HeapSnapshot* snapshot) const {
IsDeadCheck("v8::HeapSnapshot::CompareWith"); IsDeadCheck("v8::HeapSnapshot::CompareWith");
i::HeapSnapshot* snapshot1 = const_cast<i::HeapSnapshot*>(
reinterpret_cast<const i::HeapSnapshot*>(this));
i::HeapSnapshot* snapshot2 = const_cast<i::HeapSnapshot*>(
reinterpret_cast<const i::HeapSnapshot*>(snapshot));
return reinterpret_cast<const HeapSnapshotsDiff*>( return reinterpret_cast<const HeapSnapshotsDiff*>(
snapshot1->CompareWith(snapshot2)); ToInternal(this)->CompareWith(ToInternal(snapshot)));
} }

23
deps/v8/src/arm/assembler-arm-inl.h

@ -190,6 +190,29 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
} }
template<typename StaticVisitor>
void RelocInfo::Visit() {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitPointer(target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
StaticVisitor::VisitRuntimeEntry(this);
}
}
Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) { Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg; rm_ = no_reg;
imm32_ = immediate; imm32_ = immediate;

15
deps/v8/src/arm/assembler-arm.cc

@ -2276,6 +2276,21 @@ void Assembler::vcmp(const DwVfpRegister src1,
} }
void Assembler::vcmp(const DwVfpRegister src1,
const double src2,
const SBit s,
const Condition cond) {
// vcmp(Dd, Dm) double precision floating point comparison.
// Instruction details available in ARM DDI 0406A, A8-570.
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(src2 == 0.0);
emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
src1.code()*B12 | 0x5*B9 | B8 | B6);
}
void Assembler::vmrs(Register dst, Condition cond) { void Assembler::vmrs(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652. // Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) | // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |

4
deps/v8/src/arm/assembler-arm.h

@ -1031,6 +1031,10 @@ class Assembler : public Malloced {
const DwVfpRegister src2, const DwVfpRegister src2,
const SBit s = LeaveCC, const SBit s = LeaveCC,
const Condition cond = al); const Condition cond = al);
void vcmp(const DwVfpRegister src1,
const double src2,
const SBit s = LeaveCC,
const Condition cond = al);
void vmrs(const Register dst, void vmrs(const Register dst,
const Condition cond = al); const Condition cond = al);
void vsqrt(const DwVfpRegister dst, void vsqrt(const DwVfpRegister dst,

25
deps/v8/src/arm/builtins-arm.cc

@ -911,6 +911,29 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
} }
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
// Enter an internal frame.
__ EnterInternalFrame();
// Preserve the function.
__ push(r1);
// Push the function on the stack as the argument to the runtime function.
__ push(r1);
__ CallRuntime(Runtime::kLazyCompile, 1);
// Calculate the entry point.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore saved function.
__ pop(r1);
// Tear down temporary frame.
__ LeaveInternalFrame();
// Do a tail-call of the compiled function.
__ Jump(r2);
}
void Builtins::Generate_FunctionCall(MacroAssembler* masm) { void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument. // 1. Make sure we have at least one argument.
// r0: actual number of arguments // r0: actual number of arguments
@ -1050,7 +1073,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ ldr(r2, __ ldr(r2,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset)); FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(r2, Operand(r2, ASR, kSmiTagSize)); __ mov(r2, Operand(r2, ASR, kSmiTagSize));
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset)); __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeOffset));
__ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
__ cmp(r2, r0); // Check formal and actual parameter counts. __ cmp(r2, r0); // Check formal and actual parameter counts.
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)), __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),

666
deps/v8/src/arm/codegen-arm.cc

File diff suppressed because it is too large

23
deps/v8/src/arm/codegen-arm.h

@ -286,6 +286,10 @@ class CodeGenerator: public AstVisitor {
return inlined_write_barrier_size_ + 4; return inlined_write_barrier_size_ + 4;
} }
static MemOperand ContextOperand(Register context, int index) {
return MemOperand(context, Context::SlotOffset(index));
}
private: private:
// Construction/Destruction // Construction/Destruction
explicit CodeGenerator(MacroAssembler* masm); explicit CodeGenerator(MacroAssembler* masm);
@ -338,10 +342,6 @@ class CodeGenerator: public AstVisitor {
void LoadReference(Reference* ref); void LoadReference(Reference* ref);
void UnloadReference(Reference* ref); void UnloadReference(Reference* ref);
static MemOperand ContextOperand(Register context, int index) {
return MemOperand(context, Context::SlotOffset(index));
}
MemOperand SlotOperand(Slot* slot, Register tmp); MemOperand SlotOperand(Slot* slot, Register tmp);
MemOperand ContextSlotOperandCheckExtensions(Slot* slot, MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
@ -482,6 +482,8 @@ class CodeGenerator: public AstVisitor {
void GenerateIsSpecObject(ZoneList<Expression*>* args); void GenerateIsSpecObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args); void GenerateIsFunction(ZoneList<Expression*>* args);
void GenerateIsUndetectableObject(ZoneList<Expression*>* args); void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
void GenerateIsStringWrapperSafeForDefaultValueOf(
ZoneList<Expression*>* args);
// Support for construct call checks. // Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args); void GenerateIsConstructCall(ZoneList<Expression*>* args);
@ -623,6 +625,19 @@ class TranscendentalCacheStub: public CodeStub {
}; };
class ToBooleanStub: public CodeStub {
public:
explicit ToBooleanStub(Register tos) : tos_(tos) { }
void Generate(MacroAssembler* masm);
private:
Register tos_;
Major MajorKey() { return ToBoolean; }
int MinorKey() { return tos_.code(); }
};
class GenericBinaryOpStub : public CodeStub { class GenericBinaryOpStub : public CodeStub {
public: public:
GenericBinaryOpStub(Token::Value op, GenericBinaryOpStub(Token::Value op,

8
deps/v8/src/arm/debug-arm.cc

@ -293,15 +293,11 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
masm->Abort("LiveEdit frame dropping is not supported on arm"); masm->Abort("LiveEdit frame dropping is not supported on arm");
} }
const bool Debug::kFrameDropperSupported = false;
#undef __ #undef __
Object** Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
Handle<Code> code) {
UNREACHABLE();
return NULL;
}
const int Debug::kFrameDropperFrameSize = -1;
#endif // ENABLE_DEBUGGER_SUPPORT #endif // ENABLE_DEBUGGER_SUPPORT

8
deps/v8/src/arm/disasm-arm.cc

@ -1188,7 +1188,13 @@ void Decoder::DecodeVCMP(Instr* instr) {
bool raise_exception_for_qnan = (instr->Bit(7) == 0x1); bool raise_exception_for_qnan = (instr->Bit(7) == 0x1);
if (dp_operation && !raise_exception_for_qnan) { if (dp_operation && !raise_exception_for_qnan) {
Format(instr, "vcmp.f64'cond 'Dd, 'Dm"); if (instr->Opc2Field() == 0x4) {
Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
} else if (instr->Opc2Field() == 0x5) {
Format(instr, "vcmp.f64'cond 'Dd, #0.0");
} else {
Unknown(instr); // invalid
}
} else { } else {
Unknown(instr); // Not used by V8. Unknown(instr); // Not used by V8.
} }

241
deps/v8/src/arm/fast-codegen-arm.cc

@ -1,241 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "codegen-inl.h"
#include "fast-codegen.h"
#include "scopes.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm())
Register FastCodeGenerator::accumulator0() { return r0; }
Register FastCodeGenerator::accumulator1() { return r1; }
Register FastCodeGenerator::scratch0() { return r3; }
Register FastCodeGenerator::scratch1() { return r4; }
Register FastCodeGenerator::scratch2() { return r5; }
Register FastCodeGenerator::receiver_reg() { return r2; }
Register FastCodeGenerator::context_reg() { return cp; }
void FastCodeGenerator::EmitLoadReceiver() {
// Offset 2 is due to return address and saved frame pointer.
int index = 2 + scope()->num_parameters();
__ ldr(receiver_reg(), MemOperand(sp, index * kPointerSize));
}
void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> cell) {
ASSERT(!destination().is(no_reg));
ASSERT(cell->IsJSGlobalPropertyCell());
__ mov(destination(), Operand(cell));
__ ldr(destination(),
FieldMemOperand(destination(), JSGlobalPropertyCell::kValueOffset));
if (FLAG_debug_code) {
__ mov(ip, Operand(Factory::the_hole_value()));
__ cmp(destination(), ip);
__ Check(ne, "DontDelete cells can't contain the hole");
}
// The loaded value is not known to be a smi.
clear_as_smi(destination());
}
void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
LookupResult lookup;
info()->receiver()->Lookup(*name, &lookup);
ASSERT(lookup.holder() == *info()->receiver());
ASSERT(lookup.type() == FIELD);
Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
int index = lookup.GetFieldIndex() - map->inobject_properties();
int offset = index * kPointerSize;
// We will emit the write barrier unless the stored value is statically
// known to be a smi.
bool needs_write_barrier = !is_smi(accumulator0());
// Negative offsets are inobject properties.
if (offset < 0) {
offset += map->instance_size();
__ str(accumulator0(), FieldMemOperand(receiver_reg(), offset));
if (needs_write_barrier) {
// Preserve receiver from write barrier.
__ mov(scratch0(), receiver_reg());
}
} else {
offset += FixedArray::kHeaderSize;
__ ldr(scratch0(),
FieldMemOperand(receiver_reg(), JSObject::kPropertiesOffset));
__ str(accumulator0(), FieldMemOperand(scratch0(), offset));
}
if (needs_write_barrier) {
__ RecordWrite(scratch0(), Operand(offset), scratch1(), scratch2());
}
if (destination().is(accumulator1())) {
__ mov(accumulator1(), accumulator0());
if (is_smi(accumulator0())) {
set_as_smi(accumulator1());
} else {
clear_as_smi(accumulator1());
}
}
}
void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
ASSERT(!destination().is(no_reg));
LookupResult lookup;
info()->receiver()->Lookup(*name, &lookup);
ASSERT(lookup.holder() == *info()->receiver());
ASSERT(lookup.type() == FIELD);
Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
int index = lookup.GetFieldIndex() - map->inobject_properties();
int offset = index * kPointerSize;
// Perform the load. Negative offsets are inobject properties.
if (offset < 0) {
offset += map->instance_size();
__ ldr(destination(), FieldMemOperand(receiver_reg(), offset));
} else {
offset += FixedArray::kHeaderSize;
__ ldr(scratch0(),
FieldMemOperand(receiver_reg(), JSObject::kPropertiesOffset));
__ ldr(destination(), FieldMemOperand(scratch0(), offset));
}
// The loaded value is not known to be a smi.
clear_as_smi(destination());
}
void FastCodeGenerator::EmitBitOr() {
if (is_smi(accumulator0()) && is_smi(accumulator1())) {
// If both operands are known to be a smi then there is no need to check
// the operands or result. There is no need to perform the operation in
// an effect context.
if (!destination().is(no_reg)) {
__ orr(destination(), accumulator1(), Operand(accumulator0()));
}
} else {
// Left is in accumulator1, right in accumulator0.
if (destination().is(accumulator0())) {
__ mov(scratch0(), accumulator0());
__ orr(destination(), accumulator1(), Operand(accumulator1()));
Label* bailout =
info()->AddBailout(accumulator1(), scratch0()); // Left, right.
__ BranchOnNotSmi(destination(), bailout);
} else if (destination().is(accumulator1())) {
__ mov(scratch0(), accumulator1());
__ orr(destination(), accumulator1(), Operand(accumulator0()));
Label* bailout = info()->AddBailout(scratch0(), accumulator0());
__ BranchOnNotSmi(destination(), bailout);
} else {
ASSERT(destination().is(no_reg));
__ orr(scratch0(), accumulator1(), Operand(accumulator0()));
Label* bailout = info()->AddBailout(accumulator1(), accumulator0());
__ BranchOnNotSmi(scratch0(), bailout);
}
}
// If we didn't bailout, the result (in fact, both inputs too) is known to
// be a smi.
set_as_smi(accumulator0());
set_as_smi(accumulator1());
}
void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
ASSERT(info_ == NULL);
info_ = compilation_info;
Comment cmnt(masm_, "[ function compiled by fast code generator");
// Save the caller's frame pointer and set up our own.
Comment prologue_cmnt(masm(), ";; Prologue");
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
__ add(fp, sp, Operand(2 * kPointerSize));
// Note that we keep a live register reference to cp (context) at
// this point.
Label* bailout_to_beginning = info()->AddBailout();
// Receiver (this) is allocated to a fixed register.
if (info()->has_this_properties()) {
Comment cmnt(masm(), ";; MapCheck(this)");
if (FLAG_print_ir) {
PrintF("MapCheck(this)\n");
}
ASSERT(info()->has_receiver() && info()->receiver()->IsHeapObject());
Handle<HeapObject> object = Handle<HeapObject>::cast(info()->receiver());
Handle<Map> map(object->map());
EmitLoadReceiver();
__ CheckMap(receiver_reg(), scratch0(), map, bailout_to_beginning, false);
}
// If there is a global variable access check if the global object is the
// same as at lazy-compilation time.
if (info()->has_globals()) {
Comment cmnt(masm(), ";; MapCheck(GLOBAL)");
if (FLAG_print_ir) {
PrintF("MapCheck(GLOBAL)\n");
}
ASSERT(info()->has_global_object());
Handle<Map> map(info()->global_object()->map());
__ ldr(scratch0(), CodeGenerator::GlobalObject());
__ CheckMap(scratch0(), scratch1(), map, bailout_to_beginning, true);
}
VisitStatements(function()->body());
Comment return_cmnt(masm(), ";; Return(<undefined>)");
if (FLAG_print_ir) {
PrintF("Return(<undefined>)\n");
}
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit());
int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
__ add(sp, sp, Operand(sp_delta));
__ Jump(lr);
}
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

184
deps/v8/src/arm/full-codegen-arm.cc

@ -55,99 +55,97 @@ namespace internal {
// //
// The function builds a JS frame. Please see JavaScriptFrameConstants in // The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-arm.h for its layout. // frames-arm.h for its layout.
void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) { void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL); ASSERT(info_ == NULL);
info_ = info; info_ = info;
SetFunctionPosition(function()); SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator"); Comment cmnt(masm_, "[ function compiled by full code generator");
if (mode == PRIMARY) { int locals_count = scope()->num_stack_slots();
int locals_count = scope()->num_stack_slots();
__ Push(lr, fp, cp, r1); __ Push(lr, fp, cp, r1);
if (locals_count > 0) { if (locals_count > 0) {
// Load undefined value here, so the value is ready for the loop // Load undefined value here, so the value is ready for the loop
// below. // below.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex); __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
} }
// Adjust fp to point to caller's fp. // Adjust fp to point to caller's fp.
__ add(fp, sp, Operand(2 * kPointerSize)); __ add(fp, sp, Operand(2 * kPointerSize));
{ Comment cmnt(masm_, "[ Allocate locals"); { Comment cmnt(masm_, "[ Allocate locals");
for (int i = 0; i < locals_count; i++) { for (int i = 0; i < locals_count; i++) {
__ push(ip); __ push(ip);
}
} }
}
bool function_in_register = true; bool function_in_register = true;
// Possibly allocate a local context. // Possibly allocate a local context.
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) { if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context"); Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is in r1. // Argument to NewContext is the function, which is in r1.
__ push(r1); __ push(r1);
if (heap_slots <= FastNewContextStub::kMaximumSlots) { if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots); FastNewContextStub stub(heap_slots);
__ CallStub(&stub); __ CallStub(&stub);
} else { } else {
__ CallRuntime(Runtime::kNewContext, 1); __ CallRuntime(Runtime::kNewContext, 1);
} }
function_in_register = false; function_in_register = false;
// Context is returned in both r0 and cp. It replaces the context // Context is returned in both r0 and cp. It replaces the context
// passed to us. It's saved in the stack and kept live in cp. // passed to us. It's saved in the stack and kept live in cp.
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context. // Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters(); int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) { for (int i = 0; i < num_parameters; i++) {
Slot* slot = scope()->parameter(i)->slot(); Slot* slot = scope()->parameter(i)->slot();
if (slot != NULL && slot->type() == Slot::CONTEXT) { if (slot != NULL && slot->type() == Slot::CONTEXT) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset + int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize; (num_parameters - 1 - i) * kPointerSize;
// Load parameter from stack. // Load parameter from stack.
__ ldr(r0, MemOperand(fp, parameter_offset)); __ ldr(r0, MemOperand(fp, parameter_offset));
// Store it in the context. // Store it in the context.
__ mov(r1, Operand(Context::SlotOffset(slot->index()))); __ mov(r1, Operand(Context::SlotOffset(slot->index())));
__ str(r0, MemOperand(cp, r1)); __ str(r0, MemOperand(cp, r1));
// Update the write barrier. This clobbers all involved // Update the write barrier. This clobbers all involved
// registers, so we have to use two more registers to avoid // registers, so we have to use two more registers to avoid
// clobbering cp. // clobbering cp.
__ mov(r2, Operand(cp)); __ mov(r2, Operand(cp));
__ RecordWrite(r2, Operand(r1), r3, r0); __ RecordWrite(r2, Operand(r1), r3, r0);
}
} }
} }
}
Variable* arguments = scope()->arguments()->AsVariable(); Variable* arguments = scope()->arguments()->AsVariable();
if (arguments != NULL) { if (arguments != NULL) {
// Function uses arguments object. // Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object"); Comment cmnt(masm_, "[ Allocate arguments object");
if (!function_in_register) { if (!function_in_register) {
// Load this again, if it's used by the local context below. // Load this again, if it's used by the local context below.
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
} else { } else {
__ mov(r3, r1); __ mov(r3, r1);
}
// Receiver is just before the parameters on the caller's stack.
int offset = scope()->num_parameters() * kPointerSize;
__ add(r2, fp,
Operand(StandardFrameConstants::kCallerSPOffset + offset));
__ mov(r1, Operand(Smi::FromInt(scope()->num_parameters())));
__ Push(r3, r2, r1);
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
__ CallStub(&stub);
// Duplicate the value; move-to-slot operation might clobber registers.
__ mov(r3, r0);
Move(arguments->slot(), r0, r1, r2);
Slot* dot_arguments_slot =
scope()->arguments_shadow()->AsVariable()->slot();
Move(dot_arguments_slot, r3, r1, r2);
} }
// Receiver is just before the parameters on the caller's stack.
int offset = scope()->num_parameters() * kPointerSize;
__ add(r2, fp,
Operand(StandardFrameConstants::kCallerSPOffset + offset));
__ mov(r1, Operand(Smi::FromInt(scope()->num_parameters())));
__ Push(r3, r2, r1);
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
__ CallStub(&stub);
// Duplicate the value; move-to-slot operation might clobber registers.
__ mov(r3, r0);
Move(arguments->slot(), r0, r1, r2);
Slot* dot_arguments_slot =
scope()->arguments_shadow()->AsVariable()->slot();
Move(dot_arguments_slot, r3, r1, r2);
} }
{ Comment cmnt(masm_, "[ Declarations"); { Comment cmnt(masm_, "[ Declarations");
@ -956,15 +954,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(r4, Operand(r2)); __ cmp(r4, Operand(r2));
__ b(eq, &update_each); __ b(eq, &update_each);
// Convert the entry to a string or null if it isn't a property // Convert the entry to a string or (smi) 0 if it isn't a property
// anymore. If the property has been removed while iterating, we // any more. If the property has been removed while iterating, we
// just skip it. // just skip it.
__ push(r1); // Enumerable. __ push(r1); // Enumerable.
__ push(r3); // Current entry. __ push(r3); // Current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS); __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS);
__ mov(r3, Operand(r0)); __ mov(r3, Operand(r0), SetCC);
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(r3, ip);
__ b(eq, loop_statement.continue_target()); __ b(eq, loop_statement.continue_target());
// Update the 'each' property or variable from the possibly filtered // Update the 'each' property or variable from the possibly filtered
@ -1959,6 +1955,26 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
} }
void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
VisitForValue(args->at(0), kAccumulator);
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
// Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
// used in a few functions in runtime.js which should not normally be hit by
// this compiler.
__ jmp(if_false);
Apply(context_, if_true, if_false);
}
void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) { void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1); ASSERT(args->length() == 1);

12
deps/v8/src/arm/macro-assembler-arm.cc

@ -757,7 +757,7 @@ void MacroAssembler::InvokeFunction(Register fun,
SharedFunctionInfo::kFormalParameterCountOffset)); SharedFunctionInfo::kFormalParameterCountOffset));
mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize)); mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
ldr(code_reg, ldr(code_reg,
MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag)); MemOperand(r1, JSFunction::kCodeOffset - kHeapObjectTag));
add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag)); add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
ParameterCount expected(expected_reg); ParameterCount expected(expected_reg);
@ -1508,8 +1508,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
// Make sure the code objects in the builtins object and in the // Make sure the code objects in the builtins object and in the
// builtin function are the same. // builtin function are the same.
push(r1); push(r1);
ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); ldr(r1, FieldMemOperand(r1, JSFunction::kCodeOffset));
ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCodeOffset));
cmp(r1, target); cmp(r1, target);
Assert(eq, "Builtin code object changed"); Assert(eq, "Builtin code object changed");
pop(r1); pop(r1);
@ -1656,6 +1655,13 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1,
} }
void MacroAssembler::AbortIfSmi(Register object) {
ASSERT_EQ(0, kSmiTag);
tst(object, Operand(kSmiTagMask));
Assert(ne, "Operand is a smi");
}
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
Register first, Register first,
Register second, Register second,

3
deps/v8/src/arm/macro-assembler-arm.h

@ -618,6 +618,9 @@ class MacroAssembler: public Assembler {
// Jump if either of the registers contain a smi. // Jump if either of the registers contain a smi.
void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi); void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
// Abort execution if argument is a smi. Used in debug code.
void AbortIfSmi(Register object);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// String utilities // String utilities

10
deps/v8/src/arm/simulator-arm.cc

@ -2431,11 +2431,17 @@ void Simulator::DecodeVCMP(Instr* instr) {
} }
int d = GlueRegCode(!dp_operation, instr->VdField(), instr->DField()); int d = GlueRegCode(!dp_operation, instr->VdField(), instr->DField());
int m = GlueRegCode(!dp_operation, instr->VmField(), instr->MField()); int m = 0;
if (instr->Opc2Field() == 0x4) {
m = GlueRegCode(!dp_operation, instr->VmField(), instr->MField());
}
if (dp_operation) { if (dp_operation) {
double dd_value = get_double_from_d_register(d); double dd_value = get_double_from_d_register(d);
double dm_value = get_double_from_d_register(m); double dm_value = 0.0;
if (instr->Opc2Field() == 0x4) {
dm_value = get_double_from_d_register(m);
}
Compute_FPSCR_Flags(dd_value, dm_value); Compute_FPSCR_Flags(dd_value, dm_value);
} else { } else {

32
deps/v8/src/arm/stub-cache-arm.cc

@ -1212,38 +1212,6 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
} }
Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
// ----------- S t a t e -------------
// -- r1: function
// -- lr: return address
// -----------------------------------
// Enter an internal frame.
__ EnterInternalFrame();
// Preserve the function.
__ push(r1);
// Push the function on the stack as the argument to the runtime function.
__ push(r1);
__ CallRuntime(Runtime::kLazyCompile, 1);
// Calculate the entry point.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore saved function.
__ pop(r1);
// Tear down temporary frame.
__ LeaveInternalFrame();
// Do a tail-call of the compiled function.
__ Jump(r2);
return GetCodeWithFlags(flags, "LazyCompileStub");
}
void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) { void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) { if (kind_ == Code::KEYED_CALL_IC) {
__ cmp(r2, Operand(Handle<String>(name))); __ cmp(r2, Operand(Handle<String>(name)));

1
deps/v8/src/assembler.h

@ -235,6 +235,7 @@ class RelocInfo BASE_EMBEDDED {
INLINE(void set_call_object(Object* target)); INLINE(void set_call_object(Object* target));
INLINE(Object** call_object_address()); INLINE(Object** call_object_address());
template<typename StaticVisitor> inline void Visit();
inline void Visit(ObjectVisitor* v); inline void Visit(ObjectVisitor* v);
// Patch the code with some other code. // Patch the code with some other code.

18
deps/v8/src/bootstrapper.cc

@ -36,6 +36,7 @@
#include "global-handles.h" #include "global-handles.h"
#include "macro-assembler.h" #include "macro-assembler.h"
#include "natives.h" #include "natives.h"
#include "objects-visiting.h"
#include "snapshot.h" #include "snapshot.h"
#include "stub-cache.h" #include "stub-cache.h"
@ -56,7 +57,7 @@ class SourceCodeCache BASE_EMBEDDED {
} }
void Iterate(ObjectVisitor* v) { void Iterate(ObjectVisitor* v) {
v->VisitPointer(BitCast<Object**, FixedArray**>(&cache_)); v->VisitPointer(BitCast<Object**>(&cache_));
} }
@ -470,6 +471,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction() {
Handle<Code> code = Handle<Code> code =
Handle<Code>(Builtins::builtin(Builtins::EmptyFunction)); Handle<Code>(Builtins::builtin(Builtins::EmptyFunction));
empty_function->set_code(*code); empty_function->set_code(*code);
empty_function->shared()->set_code(*code);
Handle<String> source = Factory::NewStringFromAscii(CStrVector("() {}")); Handle<String> source = Factory::NewStringFromAscii(CStrVector("() {}"));
Handle<Script> script = Factory::NewScript(source); Handle<Script> script = Factory::NewScript(source);
script->set_type(Smi::FromInt(Script::TYPE_NATIVE)); script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
@ -812,9 +814,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
initial_map->set_instance_size( initial_map->set_instance_size(
initial_map->instance_size() + 5 * kPointerSize); initial_map->instance_size() + 5 * kPointerSize);
initial_map->set_instance_descriptors(*descriptors); initial_map->set_instance_descriptors(*descriptors);
initial_map->set_scavenger( initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map));
Heap::GetScavenger(initial_map->instance_type(),
initial_map->instance_size()));
} }
{ // -- J S O N { // -- J S O N
@ -1234,6 +1234,14 @@ bool Genesis::InstallNatives() {
InstallNativeFunctions(); InstallNativeFunctions();
// Store the map for the string prototype after the natives has been compiled
// and the String function has been setup.
Handle<JSFunction> string_function(global_context()->string_function());
ASSERT(JSObject::cast(
string_function->initial_map()->prototype())->HasFastProperties());
global_context()->set_string_function_prototype_map(
HeapObject::cast(string_function->initial_map()->prototype())->map());
InstallCustomCallGenerators(); InstallCustomCallGenerators();
// Install Function.prototype.call and apply. // Install Function.prototype.call and apply.
@ -1545,6 +1553,8 @@ bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
Handle<SharedFunctionInfo> shared Handle<SharedFunctionInfo> shared
= Handle<SharedFunctionInfo>(function->shared()); = Handle<SharedFunctionInfo>(function->shared());
if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false; if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
// Set the code object on the function object.
function->set_code(function->shared()->code());
builtins->set_javascript_builtin_code(id, shared->code()); builtins->set_javascript_builtin_code(id, shared->code());
} }
return true; return true;

2
deps/v8/src/builtins.h

@ -69,6 +69,7 @@ enum BuiltinExtraArguments {
V(JSConstructStubApi, BUILTIN, UNINITIALIZED) \ V(JSConstructStubApi, BUILTIN, UNINITIALIZED) \
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED) \ V(JSEntryTrampoline, BUILTIN, UNINITIALIZED) \
V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED) \ V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED) \
V(LazyCompile, BUILTIN, UNINITIALIZED) \
\ \
V(LoadIC_Miss, BUILTIN, UNINITIALIZED) \ V(LoadIC_Miss, BUILTIN, UNINITIALIZED) \
V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED) \ V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED) \
@ -249,6 +250,7 @@ class Builtins : public AllStatic {
static void Generate_JSConstructStubApi(MacroAssembler* masm); static void Generate_JSConstructStubApi(MacroAssembler* masm);
static void Generate_JSEntryTrampoline(MacroAssembler* masm); static void Generate_JSEntryTrampoline(MacroAssembler* masm);
static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm); static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
static void Generate_LazyCompile(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm); static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
static void Generate_FunctionCall(MacroAssembler* masm); static void Generate_FunctionCall(MacroAssembler* masm);

10
deps/v8/src/checks.h

@ -280,14 +280,13 @@ template <int> class StaticAssertionHelper { };
// The ASSERT macro is equivalent to CHECK except that it only // The ASSERT macro is equivalent to CHECK except that it only
// generates code in debug builds. Ditto STATIC_ASSERT. // generates code in debug builds.
#ifdef DEBUG #ifdef DEBUG
#define ASSERT_RESULT(expr) CHECK(expr) #define ASSERT_RESULT(expr) CHECK(expr)
#define ASSERT(condition) CHECK(condition) #define ASSERT(condition) CHECK(condition)
#define ASSERT_EQ(v1, v2) CHECK_EQ(v1, v2) #define ASSERT_EQ(v1, v2) CHECK_EQ(v1, v2)
#define ASSERT_NE(v1, v2) CHECK_NE(v1, v2) #define ASSERT_NE(v1, v2) CHECK_NE(v1, v2)
#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2) #define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
#define STATIC_ASSERT(test) STATIC_CHECK(test)
#define SLOW_ASSERT(condition) if (FLAG_enable_slow_asserts) CHECK(condition) #define SLOW_ASSERT(condition) if (FLAG_enable_slow_asserts) CHECK(condition)
#else #else
#define ASSERT_RESULT(expr) (expr) #define ASSERT_RESULT(expr) (expr)
@ -295,9 +294,14 @@ template <int> class StaticAssertionHelper { };
#define ASSERT_EQ(v1, v2) ((void) 0) #define ASSERT_EQ(v1, v2) ((void) 0)
#define ASSERT_NE(v1, v2) ((void) 0) #define ASSERT_NE(v1, v2) ((void) 0)
#define ASSERT_GE(v1, v2) ((void) 0) #define ASSERT_GE(v1, v2) ((void) 0)
#define STATIC_ASSERT(test) ((void) 0)
#define SLOW_ASSERT(condition) ((void) 0) #define SLOW_ASSERT(condition) ((void) 0)
#endif #endif
// Static asserts has no impact on runtime performance, so they can be
// safely enabled in release mode. Moreover, the ((void) 0) expression
// obeys different syntax rules than typedef's, e.g. it can't appear
// inside class declaration, this leads to inconsistency between debug
// and release compilation modes behaviour.
#define STATIC_ASSERT(test) STATIC_CHECK(test)
#define ASSERT_TAG_ALIGNED(address) \ #define ASSERT_TAG_ALIGNED(address) \

15
deps/v8/src/codegen.cc

@ -77,14 +77,23 @@ void CodeGenerator::ProcessDeferred() {
// Generate the code. // Generate the code.
Comment cmnt(masm_, code->comment()); Comment cmnt(masm_, code->comment());
masm_->bind(code->entry_label()); masm_->bind(code->entry_label());
code->SaveRegisters(); if (code->AutoSaveAndRestore()) {
code->SaveRegisters();
}
code->Generate(); code->Generate();
code->RestoreRegisters(); if (code->AutoSaveAndRestore()) {
masm_->jmp(code->exit_label()); code->RestoreRegisters();
code->Exit();
}
} }
} }
void DeferredCode::Exit() {
masm_->jmp(exit_label());
}
void CodeGenerator::SetFrame(VirtualFrame* new_frame, void CodeGenerator::SetFrame(VirtualFrame* new_frame,
RegisterFile* non_frame_registers) { RegisterFile* non_frame_registers) {
RegisterFile saved_counts; RegisterFile saved_counts;

24
deps/v8/src/codegen.h

@ -101,7 +101,8 @@ enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
F(IsObject, 1, 1) \ F(IsObject, 1, 1) \
F(IsFunction, 1, 1) \ F(IsFunction, 1, 1) \
F(IsUndetectableObject, 1, 1) \ F(IsUndetectableObject, 1, 1) \
F(IsSpecObject, 1, 1) \ F(IsSpecObject, 1, 1) \
F(IsStringWrapperSafeForDefaultValueOf, 1, 1) \
F(StringAdd, 2, 1) \ F(StringAdd, 2, 1) \
F(SubString, 3, 1) \ F(SubString, 3, 1) \
F(StringCompare, 2, 1) \ F(StringCompare, 2, 1) \
@ -319,6 +320,15 @@ class DeferredCode: public ZoneObject {
void SaveRegisters(); void SaveRegisters();
void RestoreRegisters(); void RestoreRegisters();
void Exit();
// If this returns true then all registers will be saved for the duration
// of the Generate() call. Otherwise the registers are not saved and the
// Generate() call must bracket runtime any runtime calls with calls to
// SaveRegisters() and RestoreRegisters(). In this case the Generate
// method must also call Exit() in order to return to the non-deferred
// code.
virtual bool AutoSaveAndRestore() { return true; }
protected: protected:
MacroAssembler* masm_; MacroAssembler* masm_;
@ -721,18 +731,6 @@ class CallFunctionStub: public CodeStub {
}; };
class ToBooleanStub: public CodeStub {
public:
ToBooleanStub() { }
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return ToBoolean; }
int MinorKey() { return 0; }
};
enum StringIndexFlags { enum StringIndexFlags {
// Accepts smis or heap numbers. // Accepts smis or heap numbers.
STRING_INDEX_IS_NUMBER, STRING_INDEX_IS_NUMBER,

28
deps/v8/src/compiler.cc

@ -33,7 +33,6 @@
#include "compiler.h" #include "compiler.h"
#include "data-flow.h" #include "data-flow.h"
#include "debug.h" #include "debug.h"
#include "fast-codegen.h"
#include "flow-graph.h" #include "flow-graph.h"
#include "full-codegen.h" #include "full-codegen.h"
#include "liveedit.h" #include "liveedit.h"
@ -120,14 +119,9 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
// //
// --full-compiler enables the dedicated backend for code we expect to be // --full-compiler enables the dedicated backend for code we expect to be
// run once // run once
// --fast-compiler enables a speculative optimizing backend (for
// non-run-once code)
// //
// The normal choice of backend can be overridden with the flags // The normal choice of backend can be overridden with the flags
// --always-full-compiler and --always-fast-compiler, which are mutually // --always-full-compiler.
// incompatible.
CHECK(!FLAG_always_full_compiler || !FLAG_always_fast_compiler);
Handle<SharedFunctionInfo> shared = info->shared_info(); Handle<SharedFunctionInfo> shared = info->shared_info();
bool is_run_once = (shared.is_null()) bool is_run_once = (shared.is_null())
? info->scope()->is_global_scope() ? info->scope()->is_global_scope()
@ -141,13 +135,6 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
if (checker.has_supported_syntax()) { if (checker.has_supported_syntax()) {
return FullCodeGenerator::MakeCode(info); return FullCodeGenerator::MakeCode(info);
} }
} else if (FLAG_always_fast_compiler ||
(FLAG_fast_compiler && !is_run_once)) {
FastCodeGenSyntaxChecker checker;
checker.Check(info);
if (checker.has_supported_syntax()) {
return FastCodeGenerator::MakeCode(info);
}
} }
return CodeGenerator::MakeCode(info); return CodeGenerator::MakeCode(info);
@ -494,7 +481,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
// Generate code // Generate code
Handle<Code> code; Handle<Code> code;
if (FLAG_lazy && allow_lazy) { if (FLAG_lazy && allow_lazy) {
code = ComputeLazyCompile(literal->num_parameters()); code = Handle<Code>(Builtins::builtin(Builtins::LazyCompile));
} else { } else {
// The bodies of function literals have not yet been visited by // The bodies of function literals have not yet been visited by
// the AST optimizer/analyzer. // the AST optimizer/analyzer.
@ -528,7 +515,6 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
// the static helper function MakeCode. // the static helper function MakeCode.
CompilationInfo info(literal, script, false); CompilationInfo info(literal, script, false);
CHECK(!FLAG_always_full_compiler || !FLAG_always_fast_compiler);
bool is_run_once = literal->try_full_codegen(); bool is_run_once = literal->try_full_codegen();
bool is_compiled = false; bool is_compiled = false;
@ -542,16 +528,6 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
code = FullCodeGenerator::MakeCode(&info); code = FullCodeGenerator::MakeCode(&info);
is_compiled = true; is_compiled = true;
} }
} else if (FLAG_always_fast_compiler ||
(FLAG_fast_compiler && !is_run_once)) {
// Since we are not lazily compiling we do not have a receiver to
// specialize for.
FastCodeGenSyntaxChecker checker;
checker.Check(&info);
if (checker.has_supported_syntax()) {
code = FastCodeGenerator::MakeCode(&info);
is_compiled = true;
}
} }
if (!is_compiled) { if (!is_compiled) {

53
deps/v8/src/compiler.h

@ -41,37 +41,6 @@ namespace internal {
// is constructed based on the resources available at compile-time. // is constructed based on the resources available at compile-time.
class CompilationInfo BASE_EMBEDDED { class CompilationInfo BASE_EMBEDDED {
public: public:
// Compilation mode. Either the compiler is used as the primary
// compiler and needs to setup everything or the compiler is used as
// the secondary compiler for split compilation and has to handle
// bailouts.
enum Mode {
PRIMARY,
SECONDARY
};
// A description of the compilation state at a bailout to the secondary
// code generator.
//
// The state is currently simple: there are no parameters or local
// variables to worry about ('this' can be found in the stack frame).
// There are at most two live values.
//
// There is a label that should be bound to the beginning of the bailout
// stub code.
class Bailout : public ZoneObject {
public:
Bailout(Register left, Register right) : left_(left), right_(right) {}
Label* label() { return &label_; }
private:
Register left_;
Register right_;
Label label_;
};
// Lazy compilation of a JSFunction. // Lazy compilation of a JSFunction.
CompilationInfo(Handle<JSFunction> closure, CompilationInfo(Handle<JSFunction> closure,
int loop_nesting, int loop_nesting,
@ -145,12 +114,6 @@ class CompilationInfo BASE_EMBEDDED {
int loop_nesting() { return loop_nesting_; } int loop_nesting() { return loop_nesting_; }
bool has_receiver() { return !receiver_.is_null(); } bool has_receiver() { return !receiver_.is_null(); }
Handle<Object> receiver() { return receiver_; } Handle<Object> receiver() { return receiver_; }
List<Bailout*>* bailouts() { return &bailouts_; }
// Accessors for mutable fields (possibly set by analysis passes) with
// default values given by Initialize.
Mode mode() { return mode_; }
void set_mode(Mode mode) { mode_ = mode; }
bool has_this_properties() { return has_this_properties_; } bool has_this_properties() { return has_this_properties_; }
void set_has_this_properties(bool flag) { has_this_properties_ = flag; } void set_has_this_properties(bool flag) { has_this_properties_ = flag; }
@ -169,19 +132,8 @@ class CompilationInfo BASE_EMBEDDED {
// Derived accessors. // Derived accessors.
Scope* scope() { return function()->scope(); } Scope* scope() { return function()->scope(); }
// Add a bailout with two live values.
Label* AddBailout(Register left, Register right) {
Bailout* bailout = new Bailout(left, right);
bailouts_.Add(bailout);
return bailout->label();
}
// Add a bailout with no live values.
Label* AddBailout() { return AddBailout(no_reg, no_reg); }
private: private:
void Initialize() { void Initialize() {
mode_ = PRIMARY;
has_this_properties_ = false; has_this_properties_ = false;
has_globals_ = false; has_globals_ = false;
} }
@ -191,7 +143,6 @@ class CompilationInfo BASE_EMBEDDED {
Handle<Script> script_; Handle<Script> script_;
FunctionLiteral* function_; FunctionLiteral* function_;
Mode mode_;
bool is_eval_; bool is_eval_;
int loop_nesting_; int loop_nesting_;
@ -201,10 +152,6 @@ class CompilationInfo BASE_EMBEDDED {
bool has_this_properties_; bool has_this_properties_;
bool has_globals_; bool has_globals_;
// An ordered list of bailout points encountered during fast-path
// compilation.
List<Bailout*> bailouts_;
DISALLOW_COPY_AND_ASSIGN(CompilationInfo); DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
}; };

2
deps/v8/src/contexts.h

@ -56,6 +56,7 @@ enum ContextLookupFlags {
V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function) \ V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function) \
V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \ V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \
V(STRING_FUNCTION_INDEX, JSFunction, string_function) \ V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \ V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \ V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
V(DATE_FUNCTION_INDEX, JSFunction, date_function) \ V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
@ -186,6 +187,7 @@ class Context: public FixedArray {
BOOLEAN_FUNCTION_INDEX, BOOLEAN_FUNCTION_INDEX,
NUMBER_FUNCTION_INDEX, NUMBER_FUNCTION_INDEX,
STRING_FUNCTION_INDEX, STRING_FUNCTION_INDEX,
STRING_FUNCTION_PROTOTYPE_MAP_INDEX,
OBJECT_FUNCTION_INDEX, OBJECT_FUNCTION_INDEX,
ARRAY_FUNCTION_INDEX, ARRAY_FUNCTION_INDEX,
DATE_FUNCTION_INDEX, DATE_FUNCTION_INDEX,

11
deps/v8/src/cpu-profiler.cc

@ -476,7 +476,7 @@ void CpuProfiler::StartProcessorIfNotStarted() {
CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) { CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
const double actual_sampling_rate = generator_->actual_sampling_rate(); const double actual_sampling_rate = generator_->actual_sampling_rate();
StopProcessorIfLastProfile(); StopProcessorIfLastProfile(title);
CpuProfile* result = CpuProfile* result =
profiles_->StopProfiling(TokenEnumerator::kNoSecurityToken, profiles_->StopProfiling(TokenEnumerator::kNoSecurityToken,
title, title,
@ -491,14 +491,15 @@ CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token, CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
String* title) { String* title) {
const double actual_sampling_rate = generator_->actual_sampling_rate(); const double actual_sampling_rate = generator_->actual_sampling_rate();
StopProcessorIfLastProfile(); const char* profile_title = profiles_->GetName(title);
StopProcessorIfLastProfile(profile_title);
int token = token_enumerator_->GetTokenId(security_token); int token = token_enumerator_->GetTokenId(security_token);
return profiles_->StopProfiling(token, title, actual_sampling_rate); return profiles_->StopProfiling(token, profile_title, actual_sampling_rate);
} }
void CpuProfiler::StopProcessorIfLastProfile() { void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
if (profiles_->is_last_profile()) { if (profiles_->IsLastProfile(title)) {
reinterpret_cast<Sampler*>(Logger::ticker_)->Stop(); reinterpret_cast<Sampler*>(Logger::ticker_)->Stop();
processor_->Stop(); processor_->Stop();
processor_->Join(); processor_->Join();

2
deps/v8/src/cpu-profiler.h

@ -260,7 +260,7 @@ class CpuProfiler {
void StartProcessorIfNotStarted(); void StartProcessorIfNotStarted();
CpuProfile* StopCollectingProfile(const char* title); CpuProfile* StopCollectingProfile(const char* title);
CpuProfile* StopCollectingProfile(Object* security_token, String* title); CpuProfile* StopCollectingProfile(Object* security_token, String* title);
void StopProcessorIfLastProfile(); void StopProcessorIfLastProfile(const char* title);
CpuProfilesCollection* profiles_; CpuProfilesCollection* profiles_;
unsigned next_profile_uid_; unsigned next_profile_uid_;

33
deps/v8/src/debug.cc

@ -582,6 +582,35 @@ int Debug::ArchiveSpacePerThread() {
} }
// Frame structure (conforms InternalFrame structure):
// -- code
// -- SMI maker
// -- function (slot is called "context")
// -- frame base
Object** Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
Handle<Code> code) {
ASSERT(bottom_js_frame->is_java_script());
Address fp = bottom_js_frame->fp();
// Move function pointer into "context" slot.
Memory::Object_at(fp + StandardFrameConstants::kContextOffset) =
Memory::Object_at(fp + JavaScriptFrameConstants::kFunctionOffset);
Memory::Object_at(fp + InternalFrameConstants::kCodeOffset) = *code;
Memory::Object_at(fp + StandardFrameConstants::kMarkerOffset) =
Smi::FromInt(StackFrame::INTERNAL);
return reinterpret_cast<Object**>(&Memory::Object_at(
fp + StandardFrameConstants::kContextOffset));
}
const int Debug::kFrameDropperFrameSize = 4;
// Default break enabled. // Default break enabled.
bool Debug::disable_break_ = false; bool Debug::disable_break_ = false;
@ -852,8 +881,8 @@ void Debug::PreemptionWhileInDebugger() {
void Debug::Iterate(ObjectVisitor* v) { void Debug::Iterate(ObjectVisitor* v) {
v->VisitPointer(BitCast<Object**, Code**>(&(debug_break_return_))); v->VisitPointer(BitCast<Object**>(&(debug_break_return_)));
v->VisitPointer(BitCast<Object**, Code**>(&(debug_break_slot_))); v->VisitPointer(BitCast<Object**>(&(debug_break_slot_)));
} }

10
deps/v8/src/debug.h

@ -400,6 +400,11 @@ class Debug {
static void GenerateStubNoRegistersDebugBreak(MacroAssembler* masm); static void GenerateStubNoRegistersDebugBreak(MacroAssembler* masm);
static void GenerateSlotDebugBreak(MacroAssembler* masm); static void GenerateSlotDebugBreak(MacroAssembler* masm);
static void GeneratePlainReturnLiveEdit(MacroAssembler* masm); static void GeneratePlainReturnLiveEdit(MacroAssembler* masm);
// FrameDropper is a code replacement for a JavaScript frame with possibly
// several frames above.
// There is no calling conventions here, because it never actually gets
// called, it only gets returned to.
static void GenerateFrameDropperLiveEdit(MacroAssembler* masm); static void GenerateFrameDropperLiveEdit(MacroAssembler* masm);
// Called from stub-cache.cc. // Called from stub-cache.cc.
@ -431,13 +436,14 @@ class Debug {
// the value that is called 'restarter_frame_function_pointer'. The value // the value that is called 'restarter_frame_function_pointer'. The value
// at this address (possibly updated by GC) may be used later when preparing // at this address (possibly updated by GC) may be used later when preparing
// 'step in' operation. // 'step in' operation.
// The implementation is architecture-specific.
// TODO(LiveEdit): consider reviewing it as architecture-independent.
static Object** SetUpFrameDropperFrame(StackFrame* bottom_js_frame, static Object** SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
Handle<Code> code); Handle<Code> code);
static const int kFrameDropperFrameSize; static const int kFrameDropperFrameSize;
// Architecture-specific constant.
static const bool kFrameDropperSupported;
private: private:
static bool CompileDebuggerScript(int index); static bool CompileDebuggerScript(int index);
static void ClearOneShot(); static void ClearOneShot();

14
deps/v8/src/factory.cc

@ -32,6 +32,7 @@
#include "execution.h" #include "execution.h"
#include "factory.h" #include "factory.h"
#include "macro-assembler.h" #include "macro-assembler.h"
#include "objects-visiting.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -277,8 +278,7 @@ Handle<Map> Factory::CopyMap(Handle<Map> src,
copy->set_inobject_properties(inobject_properties); copy->set_inobject_properties(inobject_properties);
copy->set_unused_property_fields(inobject_properties); copy->set_unused_property_fields(inobject_properties);
copy->set_instance_size(copy->instance_size() + instance_size_delta); copy->set_instance_size(copy->instance_size() + instance_size_delta);
copy->set_scavenger(Heap::GetScavenger(copy->instance_type(), copy->set_visitor_id(StaticVisitorBase::GetVisitorId(*copy));
copy->instance_size()));
return copy; return copy;
} }
@ -486,6 +486,10 @@ Handle<JSFunction> Factory::NewFunction(Handle<String> name,
bool force_initial_map) { bool force_initial_map) {
// Allocate the function // Allocate the function
Handle<JSFunction> function = NewFunction(name, the_hole_value()); Handle<JSFunction> function = NewFunction(name, the_hole_value());
// Setup the code pointer in both the shared function info and in
// the function itself.
function->shared()->set_code(*code);
function->set_code(*code); function->set_code(*code);
if (force_initial_map || if (force_initial_map ||
@ -511,9 +515,12 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
Handle<JSObject> prototype, Handle<JSObject> prototype,
Handle<Code> code, Handle<Code> code,
bool force_initial_map) { bool force_initial_map) {
// Allocate the function // Allocate the function.
Handle<JSFunction> function = NewFunction(name, prototype); Handle<JSFunction> function = NewFunction(name, prototype);
// Setup the code pointer in both the shared function info and in
// the function itself.
function->shared()->set_code(*code);
function->set_code(*code); function->set_code(*code);
if (force_initial_map || if (force_initial_map ||
@ -535,6 +542,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name, Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
Handle<Code> code) { Handle<Code> code) {
Handle<JSFunction> function = NewFunctionWithoutPrototype(name); Handle<JSFunction> function = NewFunctionWithoutPrototype(name);
function->shared()->set_code(*code);
function->set_code(*code); function->set_code(*code);
ASSERT(!function->has_initial_map()); ASSERT(!function->has_initial_map());
ASSERT(!function->has_prototype()); ASSERT(!function->has_prototype());

4
deps/v8/src/factory.h

@ -329,7 +329,7 @@ class Factory : public AllStatic {
#define ROOT_ACCESSOR(type, name, camel_name) \ #define ROOT_ACCESSOR(type, name, camel_name) \
static inline Handle<type> name() { \ static inline Handle<type> name() { \
return Handle<type>(BitCast<type**, Object**>( \ return Handle<type>(BitCast<type**>( \
&Heap::roots_[Heap::k##camel_name##RootIndex])); \ &Heap::roots_[Heap::k##camel_name##RootIndex])); \
} }
ROOT_LIST(ROOT_ACCESSOR) ROOT_LIST(ROOT_ACCESSOR)
@ -337,7 +337,7 @@ class Factory : public AllStatic {
#define SYMBOL_ACCESSOR(name, str) \ #define SYMBOL_ACCESSOR(name, str) \
static inline Handle<String> name() { \ static inline Handle<String> name() { \
return Handle<String>(BitCast<String**, Object**>( \ return Handle<String>(BitCast<String**>( \
&Heap::roots_[Heap::k##name##RootIndex])); \ &Heap::roots_[Heap::k##name##RootIndex])); \
} }
SYMBOL_LIST(SYMBOL_ACCESSOR) SYMBOL_LIST(SYMBOL_ACCESSOR)

746
deps/v8/src/fast-codegen.cc

@ -1,746 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen-inl.h"
#include "data-flow.h"
#include "fast-codegen.h"
#include "scopes.h"
namespace v8 {
namespace internal {
#define BAILOUT(reason) \
do { \
if (FLAG_trace_bailout) { \
PrintF("%s\n", reason); \
} \
has_supported_syntax_ = false; \
return; \
} while (false)
#define CHECK_BAILOUT \
do { \
if (!has_supported_syntax_) return; \
} while (false)
void FastCodeGenSyntaxChecker::Check(CompilationInfo* info) {
info_ = info;
// We do not specialize if we do not have a receiver or if it is not a
// JS object with fast mode properties.
if (!info->has_receiver()) BAILOUT("No receiver");
if (!info->receiver()->IsJSObject()) BAILOUT("Receiver is not an object");
Handle<JSObject> object = Handle<JSObject>::cast(info->receiver());
if (!object->HasFastProperties()) BAILOUT("Receiver is in dictionary mode");
// We do not support stack or heap slots (both of which require
// allocation).
Scope* scope = info->scope();
if (scope->num_stack_slots() > 0) {
BAILOUT("Function has stack-allocated locals");
}
if (scope->num_heap_slots() > 0) {
BAILOUT("Function has context-allocated locals");
}
VisitDeclarations(scope->declarations());
CHECK_BAILOUT;
// We do not support empty function bodies.
if (info->function()->body()->is_empty()) {
BAILOUT("Function has an empty body");
}
VisitStatements(info->function()->body());
}
void FastCodeGenSyntaxChecker::VisitDeclarations(
ZoneList<Declaration*>* decls) {
if (!decls->is_empty()) BAILOUT("Function has declarations");
}
void FastCodeGenSyntaxChecker::VisitStatements(ZoneList<Statement*>* stmts) {
if (stmts->length() != 1) {
BAILOUT("Function body is not a singleton statement.");
}
Visit(stmts->at(0));
}
void FastCodeGenSyntaxChecker::VisitDeclaration(Declaration* decl) {
UNREACHABLE();
}
void FastCodeGenSyntaxChecker::VisitBlock(Block* stmt) {
VisitStatements(stmt->statements());
}
void FastCodeGenSyntaxChecker::VisitExpressionStatement(
ExpressionStatement* stmt) {
Visit(stmt->expression());
}
void FastCodeGenSyntaxChecker::VisitEmptyStatement(EmptyStatement* stmt) {
// Supported.
}
void FastCodeGenSyntaxChecker::VisitIfStatement(IfStatement* stmt) {
BAILOUT("IfStatement");
}
void FastCodeGenSyntaxChecker::VisitContinueStatement(ContinueStatement* stmt) {
BAILOUT("Continuestatement");
}
void FastCodeGenSyntaxChecker::VisitBreakStatement(BreakStatement* stmt) {
BAILOUT("BreakStatement");
}
void FastCodeGenSyntaxChecker::VisitReturnStatement(ReturnStatement* stmt) {
BAILOUT("ReturnStatement");
}
void FastCodeGenSyntaxChecker::VisitWithEnterStatement(
WithEnterStatement* stmt) {
BAILOUT("WithEnterStatement");
}
void FastCodeGenSyntaxChecker::VisitWithExitStatement(WithExitStatement* stmt) {
BAILOUT("WithExitStatement");
}
void FastCodeGenSyntaxChecker::VisitSwitchStatement(SwitchStatement* stmt) {
BAILOUT("SwitchStatement");
}
void FastCodeGenSyntaxChecker::VisitDoWhileStatement(DoWhileStatement* stmt) {
BAILOUT("DoWhileStatement");
}
void FastCodeGenSyntaxChecker::VisitWhileStatement(WhileStatement* stmt) {
BAILOUT("WhileStatement");
}
void FastCodeGenSyntaxChecker::VisitForStatement(ForStatement* stmt) {
BAILOUT("ForStatement");
}
void FastCodeGenSyntaxChecker::VisitForInStatement(ForInStatement* stmt) {
BAILOUT("ForInStatement");
}
void FastCodeGenSyntaxChecker::VisitTryCatchStatement(TryCatchStatement* stmt) {
BAILOUT("TryCatchStatement");
}
void FastCodeGenSyntaxChecker::VisitTryFinallyStatement(
TryFinallyStatement* stmt) {
BAILOUT("TryFinallyStatement");
}
void FastCodeGenSyntaxChecker::VisitDebuggerStatement(
DebuggerStatement* stmt) {
BAILOUT("DebuggerStatement");
}
void FastCodeGenSyntaxChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
BAILOUT("FunctionLiteral");
}
void FastCodeGenSyntaxChecker::VisitSharedFunctionInfoLiteral(
SharedFunctionInfoLiteral* expr) {
BAILOUT("SharedFunctionInfoLiteral");
}
void FastCodeGenSyntaxChecker::VisitConditional(Conditional* expr) {
BAILOUT("Conditional");
}
void FastCodeGenSyntaxChecker::VisitSlot(Slot* expr) {
UNREACHABLE();
}
void FastCodeGenSyntaxChecker::VisitVariableProxy(VariableProxy* expr) {
// Only global variable references are supported.
Variable* var = expr->var();
if (!var->is_global() || var->is_this()) BAILOUT("Non-global variable");
// Check if the global variable is existing and non-deletable.
if (info()->has_global_object()) {
LookupResult lookup;
info()->global_object()->Lookup(*expr->name(), &lookup);
if (!lookup.IsProperty()) {
BAILOUT("Non-existing global variable");
}
// We do not handle global variables with accessors or interceptors.
if (lookup.type() != NORMAL) {
BAILOUT("Global variable with accessors or interceptors.");
}
// We do not handle deletable global variables.
if (!lookup.IsDontDelete()) {
BAILOUT("Deletable global variable");
}
}
}
void FastCodeGenSyntaxChecker::VisitLiteral(Literal* expr) {
BAILOUT("Literal");
}
void FastCodeGenSyntaxChecker::VisitRegExpLiteral(RegExpLiteral* expr) {
BAILOUT("RegExpLiteral");
}
void FastCodeGenSyntaxChecker::VisitObjectLiteral(ObjectLiteral* expr) {
BAILOUT("ObjectLiteral");
}
void FastCodeGenSyntaxChecker::VisitArrayLiteral(ArrayLiteral* expr) {
BAILOUT("ArrayLiteral");
}
void FastCodeGenSyntaxChecker::VisitCatchExtensionObject(
CatchExtensionObject* expr) {
BAILOUT("CatchExtensionObject");
}
void FastCodeGenSyntaxChecker::VisitAssignment(Assignment* expr) {
// Simple assignments to (named) this properties are supported.
if (expr->op() != Token::ASSIGN) BAILOUT("Non-simple assignment");
Property* prop = expr->target()->AsProperty();
if (prop == NULL) BAILOUT("Non-property assignment");
VariableProxy* proxy = prop->obj()->AsVariableProxy();
if (proxy == NULL || !proxy->var()->is_this()) {
BAILOUT("Non-this-property assignment");
}
if (!prop->key()->IsPropertyName()) {
BAILOUT("Non-named-property assignment");
}
// We will only specialize for fields on the object itself.
// Expression::IsPropertyName implies that the name is a literal
// symbol but we do not assume that.
Literal* key = prop->key()->AsLiteral();
if (key != NULL && key->handle()->IsString()) {
Handle<Object> receiver = info()->receiver();
Handle<String> name = Handle<String>::cast(key->handle());
LookupResult lookup;
receiver->Lookup(*name, &lookup);
if (!lookup.IsProperty()) {
BAILOUT("Assigned property not found at compile time");
}
if (lookup.holder() != *receiver) BAILOUT("Non-own property assignment");
if (!lookup.type() == FIELD) BAILOUT("Non-field property assignment");
} else {
UNREACHABLE();
BAILOUT("Unexpected non-string-literal property key");
}
Visit(expr->value());
}
void FastCodeGenSyntaxChecker::VisitThrow(Throw* expr) {
BAILOUT("Throw");
}
void FastCodeGenSyntaxChecker::VisitProperty(Property* expr) {
// We support named this property references.
VariableProxy* proxy = expr->obj()->AsVariableProxy();
if (proxy == NULL || !proxy->var()->is_this()) {
BAILOUT("Non-this-property reference");
}
if (!expr->key()->IsPropertyName()) {
BAILOUT("Non-named-property reference");
}
// We will only specialize for fields on the object itself.
// Expression::IsPropertyName implies that the name is a literal
// symbol but we do not assume that.
Literal* key = expr->key()->AsLiteral();
if (key != NULL && key->handle()->IsString()) {
Handle<Object> receiver = info()->receiver();
Handle<String> name = Handle<String>::cast(key->handle());
LookupResult lookup;
receiver->Lookup(*name, &lookup);
if (!lookup.IsProperty()) {
BAILOUT("Referenced property not found at compile time");
}
if (lookup.holder() != *receiver) BAILOUT("Non-own property reference");
if (!lookup.type() == FIELD) BAILOUT("Non-field property reference");
} else {
UNREACHABLE();
BAILOUT("Unexpected non-string-literal property key");
}
}
void FastCodeGenSyntaxChecker::VisitCall(Call* expr) {
BAILOUT("Call");
}
void FastCodeGenSyntaxChecker::VisitCallNew(CallNew* expr) {
BAILOUT("CallNew");
}
void FastCodeGenSyntaxChecker::VisitCallRuntime(CallRuntime* expr) {
BAILOUT("CallRuntime");
}
void FastCodeGenSyntaxChecker::VisitUnaryOperation(UnaryOperation* expr) {
BAILOUT("UnaryOperation");
}
void FastCodeGenSyntaxChecker::VisitCountOperation(CountOperation* expr) {
BAILOUT("CountOperation");
}
void FastCodeGenSyntaxChecker::VisitBinaryOperation(BinaryOperation* expr) {
// We support bitwise OR.
switch (expr->op()) {
case Token::COMMA:
BAILOUT("BinaryOperation COMMA");
case Token::OR:
BAILOUT("BinaryOperation OR");
case Token::AND:
BAILOUT("BinaryOperation AND");
case Token::BIT_OR:
// We support expressions nested on the left because they only require
// a pair of registers to keep all intermediate values in registers
// (i.e., the expression stack has height no more than two).
if (!expr->right()->IsLeaf()) BAILOUT("expression nested on right");
// We do not allow subexpressions with side effects because we
// (currently) bail out to the beginning of the full function. The
// only expressions with side effects that we would otherwise handle
// are assignments.
if (expr->left()->AsAssignment() != NULL ||
expr->right()->AsAssignment() != NULL) {
BAILOUT("subexpression of binary operation has side effects");
}
Visit(expr->left());
CHECK_BAILOUT;
Visit(expr->right());
break;
case Token::BIT_XOR:
BAILOUT("BinaryOperation BIT_XOR");
case Token::BIT_AND:
BAILOUT("BinaryOperation BIT_AND");
case Token::SHL:
BAILOUT("BinaryOperation SHL");
case Token::SAR:
BAILOUT("BinaryOperation SAR");
case Token::SHR:
BAILOUT("BinaryOperation SHR");
case Token::ADD:
BAILOUT("BinaryOperation ADD");
case Token::SUB:
BAILOUT("BinaryOperation SUB");
case Token::MUL:
BAILOUT("BinaryOperation MUL");
case Token::DIV:
BAILOUT("BinaryOperation DIV");
case Token::MOD:
BAILOUT("BinaryOperation MOD");
default:
UNREACHABLE();
}
}
void FastCodeGenSyntaxChecker::VisitCompareOperation(CompareOperation* expr) {
BAILOUT("CompareOperation");
}
void FastCodeGenSyntaxChecker::VisitThisFunction(ThisFunction* expr) {
BAILOUT("ThisFunction");
}
#undef BAILOUT
#undef CHECK_BAILOUT
#define __ ACCESS_MASM(masm())
Handle<Code> FastCodeGenerator::MakeCode(CompilationInfo* info) {
// Label the AST before calling MakeCodePrologue, so AST node numbers are
// printed with the AST.
AstLabeler labeler;
labeler.Label(info);
CodeGenerator::MakeCodePrologue(info);
const int kInitialBufferSize = 4 * KB;
MacroAssembler masm(NULL, kInitialBufferSize);
// Generate the fast-path code.
FastCodeGenerator fast_cgen(&masm);
fast_cgen.Generate(info);
if (fast_cgen.HasStackOverflow()) {
ASSERT(!Top::has_pending_exception());
return Handle<Code>::null();
}
// Generate the full code for the function in bailout mode, using the same
// macro assembler.
CodeGenerator cgen(&masm);
CodeGeneratorScope scope(&cgen);
info->set_mode(CompilationInfo::SECONDARY);
cgen.Generate(info);
if (cgen.HasStackOverflow()) {
ASSERT(!Top::has_pending_exception());
return Handle<Code>::null();
}
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
return CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
}
void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
UNREACHABLE();
}
void FastCodeGenerator::VisitBlock(Block* stmt) {
VisitStatements(stmt->statements());
}
void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
Visit(stmt->expression());
}
void FastCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
// Nothing to do.
}
void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitForStatement(ForStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitSharedFunctionInfoLiteral(
SharedFunctionInfoLiteral* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitConditional(Conditional* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitSlot(Slot* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
ASSERT(expr->var()->is_global() && !expr->var()->is_this());
// Check if we can compile a global variable load directly from the cell.
ASSERT(info()->has_global_object());
LookupResult lookup;
info()->global_object()->Lookup(*expr->name(), &lookup);
// We only support normal (non-accessor/interceptor) DontDelete properties
// for now.
ASSERT(lookup.IsProperty());
ASSERT_EQ(NORMAL, lookup.type());
ASSERT(lookup.IsDontDelete());
Handle<Object> cell(info()->global_object()->GetPropertyCell(&lookup));
// Global variable lookups do not have side effects, so we do not need to
// emit code if we are in an effect context.
if (!destination().is(no_reg)) {
Comment cmnt(masm(), ";; Global");
if (FLAG_print_ir) {
SmartPointer<char> name = expr->name()->ToCString();
PrintF("%d: t%d = Global(%s)\n", expr->num(),
expr->num(), *name);
}
EmitGlobalVariableLoad(cell);
}
}
void FastCodeGenerator::VisitLiteral(Literal* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitAssignment(Assignment* expr) {
// Known to be a simple this property assignment. Effectively a unary
// operation.
{ Register my_destination = destination();
set_destination(accumulator0());
Visit(expr->value());
set_destination(my_destination);
}
Property* prop = expr->target()->AsProperty();
ASSERT_NOT_NULL(prop);
ASSERT_NOT_NULL(prop->obj()->AsVariableProxy());
ASSERT(prop->obj()->AsVariableProxy()->var()->is_this());
ASSERT(prop->key()->IsPropertyName());
Handle<String> name =
Handle<String>::cast(prop->key()->AsLiteral()->handle());
Comment cmnt(masm(), ";; Store to this");
if (FLAG_print_ir) {
SmartPointer<char> name_string = name->ToCString();
PrintF("%d: ", expr->num());
if (!destination().is(no_reg)) PrintF("t%d = ", expr->num());
PrintF("Store(this, \"%s\", t%d)\n", *name_string,
expr->value()->num());
}
EmitThisPropertyStore(name);
}
void FastCodeGenerator::VisitThrow(Throw* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitProperty(Property* expr) {
ASSERT_NOT_NULL(expr->obj()->AsVariableProxy());
ASSERT(expr->obj()->AsVariableProxy()->var()->is_this());
ASSERT(expr->key()->IsPropertyName());
if (!destination().is(no_reg)) {
Handle<String> name =
Handle<String>::cast(expr->key()->AsLiteral()->handle());
Comment cmnt(masm(), ";; Load from this");
if (FLAG_print_ir) {
SmartPointer<char> name_string = name->ToCString();
PrintF("%d: t%d = Load(this, \"%s\")\n",
expr->num(), expr->num(), *name_string);
}
EmitThisPropertyLoad(name);
}
}
void FastCodeGenerator::VisitCall(Call* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitCallNew(CallNew* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
// We support limited binary operations: bitwise OR only allowed to be
// nested on the left.
ASSERT(expr->op() == Token::BIT_OR);
ASSERT(expr->right()->IsLeaf());
{ Register my_destination = destination();
set_destination(accumulator1());
Visit(expr->left());
set_destination(accumulator0());
Visit(expr->right());
set_destination(my_destination);
}
Comment cmnt(masm(), ";; BIT_OR");
if (FLAG_print_ir) {
PrintF("%d: ", expr->num());
if (!destination().is(no_reg)) PrintF("t%d = ", expr->num());
PrintF("BIT_OR(t%d, t%d)\n", expr->left()->num(), expr->right()->num());
}
EmitBitOr();
}
void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
UNREACHABLE();
}
#undef __
} } // namespace v8::internal

161
deps/v8/src/fast-codegen.h

@ -1,161 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_FAST_CODEGEN_H_
#define V8_FAST_CODEGEN_H_
#if V8_TARGET_ARCH_IA32
#include "ia32/fast-codegen-ia32.h"
#else
#include "v8.h"
#include "ast.h"
#include "compiler.h"
#include "list.h"
namespace v8 {
namespace internal {
class FastCodeGenSyntaxChecker: public AstVisitor {
public:
explicit FastCodeGenSyntaxChecker()
: info_(NULL), has_supported_syntax_(true) {
}
void Check(CompilationInfo* info);
CompilationInfo* info() { return info_; }
bool has_supported_syntax() { return has_supported_syntax_; }
private:
void VisitDeclarations(ZoneList<Declaration*>* decls);
void VisitStatements(ZoneList<Statement*>* stmts);
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
CompilationInfo* info_;
bool has_supported_syntax_;
DISALLOW_COPY_AND_ASSIGN(FastCodeGenSyntaxChecker);
};
class FastCodeGenerator: public AstVisitor {
public:
explicit FastCodeGenerator(MacroAssembler* masm)
: masm_(masm), info_(NULL), destination_(no_reg), smi_bits_(0) {
}
static Handle<Code> MakeCode(CompilationInfo* info);
void Generate(CompilationInfo* compilation_info);
private:
MacroAssembler* masm() { return masm_; }
CompilationInfo* info() { return info_; }
Register destination() { return destination_; }
void set_destination(Register reg) { destination_ = reg; }
FunctionLiteral* function() { return info_->function(); }
Scope* scope() { return info_->scope(); }
// Platform-specific fixed registers, all guaranteed distinct.
Register accumulator0();
Register accumulator1();
Register scratch0();
Register scratch1();
Register scratch2();
Register receiver_reg();
Register context_reg();
Register other_accumulator(Register reg) {
ASSERT(reg.is(accumulator0()) || reg.is(accumulator1()));
return (reg.is(accumulator0())) ? accumulator1() : accumulator0();
}
// Flags are true if the respective register is statically known to hold a
// smi. We do not track every register, only the accumulator registers.
bool is_smi(Register reg) {
ASSERT(!reg.is(no_reg));
return (smi_bits_ & reg.bit()) != 0;
}
void set_as_smi(Register reg) {
ASSERT(!reg.is(no_reg));
smi_bits_ = smi_bits_ | reg.bit();
}
void clear_as_smi(Register reg) {
ASSERT(!reg.is(no_reg));
smi_bits_ = smi_bits_ & ~reg.bit();
}
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
// Emit code to load the receiver from the stack into receiver_reg.
void EmitLoadReceiver();
// Emit code to load a global variable directly from a global property
// cell into the destination register.
void EmitGlobalVariableLoad(Handle<Object> cell);
// Emit a store to an own property of this. The stored value is expected
// in accumulator0 and the receiver in receiver_reg. The receiver
// register is preserved and the result (the stored value) is left in the
// destination register.
void EmitThisPropertyStore(Handle<String> name);
// Emit a load from an own property of this. The receiver is expected in
// receiver_reg. The receiver register is preserved and the result is
// left in the destination register.
void EmitThisPropertyLoad(Handle<String> name);
// Emit a bitwise or operation. The left operand is in accumulator1 and
// the right is in accumulator0. The result should be left in the
// destination register.
void EmitBitOr();
MacroAssembler* masm_;
CompilationInfo* info_;
Register destination_;
uint32_t smi_bits_;
DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
};
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
#endif // V8_FAST_CODEGEN_H_

3
deps/v8/src/flag-definitions.h

@ -148,11 +148,8 @@ DEFINE_bool(strict, false, "strict error checking")
DEFINE_int(min_preparse_length, 1024, DEFINE_int(min_preparse_length, 1024,
"minimum length for automatic enable preparsing") "minimum length for automatic enable preparsing")
DEFINE_bool(full_compiler, true, "enable dedicated backend for run-once code") DEFINE_bool(full_compiler, true, "enable dedicated backend for run-once code")
DEFINE_bool(fast_compiler, false, "enable speculative optimizing backend")
DEFINE_bool(always_full_compiler, false, DEFINE_bool(always_full_compiler, false,
"try to use the dedicated run-once backend for all code") "try to use the dedicated run-once backend for all code")
DEFINE_bool(always_fast_compiler, false,
"try to use the speculative optimizing backend for all code")
DEFINE_bool(trace_bailout, false, DEFINE_bool(trace_bailout, false,
"print reasons for falling back to using the classic V8 backend") "print reasons for falling back to using the classic V8 backend")
DEFINE_bool(safe_int32_compiler, true, DEFINE_bool(safe_int32_compiler, true,

5
deps/v8/src/full-codegen.cc

@ -677,7 +677,7 @@ Handle<Code> FullCodeGenerator::MakeCode(CompilationInfo* info) {
MacroAssembler masm(NULL, kInitialBufferSize); MacroAssembler masm(NULL, kInitialBufferSize);
FullCodeGenerator cgen(&masm); FullCodeGenerator cgen(&masm);
cgen.Generate(info, PRIMARY); cgen.Generate(info);
if (cgen.HasStackOverflow()) { if (cgen.HasStackOverflow()) {
ASSERT(!Top::has_pending_exception()); ASSERT(!Top::has_pending_exception());
return Handle<Code>::null(); return Handle<Code>::null();
@ -919,6 +919,9 @@ void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
EmitGetFromCache(expr->arguments()); EmitGetFromCache(expr->arguments());
} else if (strcmp("_IsRegExpEquivalent", *name->ToCString()) == 0) { } else if (strcmp("_IsRegExpEquivalent", *name->ToCString()) == 0) {
EmitIsRegExpEquivalent(expr->arguments()); EmitIsRegExpEquivalent(expr->arguments());
} else if (strcmp("_IsStringWrapperSafeForDefaultValueOf",
*name->ToCString()) == 0) {
EmitIsStringWrapperSafeForDefaultValueOf(expr->arguments());
} else { } else {
UNREACHABLE(); UNREACHABLE();
} }

9
deps/v8/src/full-codegen.h

@ -89,11 +89,6 @@ class BreakableStatementChecker: public AstVisitor {
class FullCodeGenerator: public AstVisitor { class FullCodeGenerator: public AstVisitor {
public: public:
enum Mode {
PRIMARY,
SECONDARY
};
explicit FullCodeGenerator(MacroAssembler* masm) explicit FullCodeGenerator(MacroAssembler* masm)
: masm_(masm), : masm_(masm),
info_(NULL), info_(NULL),
@ -106,7 +101,7 @@ class FullCodeGenerator: public AstVisitor {
static Handle<Code> MakeCode(CompilationInfo* info); static Handle<Code> MakeCode(CompilationInfo* info);
void Generate(CompilationInfo* info, Mode mode); void Generate(CompilationInfo* info);
private: private:
class Breakable; class Breakable;
@ -408,6 +403,8 @@ class FullCodeGenerator: public AstVisitor {
void EmitIsArray(ZoneList<Expression*>* arguments); void EmitIsArray(ZoneList<Expression*>* arguments);
void EmitIsRegExp(ZoneList<Expression*>* arguments); void EmitIsRegExp(ZoneList<Expression*>* arguments);
void EmitIsConstructCall(ZoneList<Expression*>* arguments); void EmitIsConstructCall(ZoneList<Expression*>* arguments);
void EmitIsStringWrapperSafeForDefaultValueOf(
ZoneList<Expression*>* arguments);
void EmitObjectEquals(ZoneList<Expression*>* arguments); void EmitObjectEquals(ZoneList<Expression*>* arguments);
void EmitArguments(ZoneList<Expression*>* arguments); void EmitArguments(ZoneList<Expression*>* arguments);
void EmitArgumentsLength(ZoneList<Expression*>* arguments); void EmitArgumentsLength(ZoneList<Expression*>* arguments);

25
deps/v8/src/globals.h

@ -507,6 +507,31 @@ union DoubleRepresentation {
}; };
// Union used for customized checking of the IEEE double types
// inlined within v8 runtime, rather than going to the underlying
// platform headers and libraries
union IeeeDoubleLittleEndianArchType {
double d;
struct {
unsigned int man_low :32;
unsigned int man_high :20;
unsigned int exp :11;
unsigned int sign :1;
} bits;
};
union IeeeDoubleBigEndianArchType {
double d;
struct {
unsigned int sign :1;
unsigned int exp :11;
unsigned int man_high :20;
unsigned int man_low :32;
} bits;
};
// AccessorCallback // AccessorCallback
struct AccessorDescriptor { struct AccessorDescriptor {
Object* (*getter)(Object* object, void* data); Object* (*getter)(Object* object, void* data);

2
deps/v8/src/handles-inl.h

@ -47,7 +47,7 @@ template <class T>
inline T* Handle<T>::operator*() const { inline T* Handle<T>::operator*() const {
ASSERT(location_ != NULL); ASSERT(location_ != NULL);
ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue); ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue);
return *location_; return *BitCast<T**>(location_);
} }

35
deps/v8/src/handles.cc

@ -637,8 +637,8 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
// Check access rights if required. // Check access rights if required.
if (current->IsAccessCheckNeeded() && if (current->IsAccessCheckNeeded() &&
!Top::MayNamedAccess(*current, Heap::undefined_value(), !Top::MayNamedAccess(*current, Heap::undefined_value(),
v8::ACCESS_KEYS)) { v8::ACCESS_KEYS)) {
Top::ReportFailedAccessCheck(*current, v8::ACCESS_KEYS); Top::ReportFailedAccessCheck(*current, v8::ACCESS_KEYS);
break; break;
} }
@ -771,20 +771,30 @@ bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
bool CompileLazy(Handle<JSFunction> function, bool CompileLazy(Handle<JSFunction> function,
Handle<Object> receiver, Handle<Object> receiver,
ClearExceptionFlag flag) { ClearExceptionFlag flag) {
CompilationInfo info(function, 0, receiver); if (function->shared()->is_compiled()) {
bool result = CompileLazyHelper(&info, flag); function->set_code(function->shared()->code());
PROFILE(FunctionCreateEvent(*function)); return true;
return result; } else {
CompilationInfo info(function, 0, receiver);
bool result = CompileLazyHelper(&info, flag);
PROFILE(FunctionCreateEvent(*function));
return result;
}
} }
bool CompileLazyInLoop(Handle<JSFunction> function, bool CompileLazyInLoop(Handle<JSFunction> function,
Handle<Object> receiver, Handle<Object> receiver,
ClearExceptionFlag flag) { ClearExceptionFlag flag) {
CompilationInfo info(function, 1, receiver); if (function->shared()->is_compiled()) {
bool result = CompileLazyHelper(&info, flag); function->set_code(function->shared()->code());
PROFILE(FunctionCreateEvent(*function)); return true;
return result; } else {
CompilationInfo info(function, 1, receiver);
bool result = CompileLazyHelper(&info, flag);
PROFILE(FunctionCreateEvent(*function));
return result;
}
} }
@ -809,11 +819,6 @@ OptimizedObjectForAddingMultipleProperties(Handle<JSObject> object,
} }
Handle<Code> ComputeLazyCompile(int argc) {
CALL_HEAP_FUNCTION(StubCache::ComputeLazyCompile(argc), Code);
}
OptimizedObjectForAddingMultipleProperties:: OptimizedObjectForAddingMultipleProperties::
~OptimizedObjectForAddingMultipleProperties() { ~OptimizedObjectForAddingMultipleProperties() {
// Reoptimize the object to allow fast property access. // Reoptimize the object to allow fast property access.

3
deps/v8/src/handles.h

@ -353,9 +353,6 @@ bool CompileLazyInLoop(Handle<JSFunction> function,
Handle<Object> receiver, Handle<Object> receiver,
ClearExceptionFlag flag); ClearExceptionFlag flag);
// Returns the lazy compilation stub for argc arguments.
Handle<Code> ComputeLazyCompile(int argc);
class NoHandleAllocation BASE_EMBEDDED { class NoHandleAllocation BASE_EMBEDDED {
public: public:
#ifndef DEBUG #ifndef DEBUG

6
deps/v8/src/heap-profiler.cc

@ -111,10 +111,10 @@ int Clusterizer::CalculateNetworkSize(JSObject* obj) {
int size = obj->Size(); int size = obj->Size();
// If 'properties' and 'elements' are non-empty (thus, non-shared), // If 'properties' and 'elements' are non-empty (thus, non-shared),
// take their size into account. // take their size into account.
if (FixedArray::cast(obj->properties())->length() != 0) { if (obj->properties() != Heap::empty_fixed_array()) {
size += obj->properties()->Size(); size += obj->properties()->Size();
} }
if (FixedArray::cast(obj->elements())->length() != 0) { if (obj->elements() != Heap::empty_fixed_array()) {
size += obj->elements()->Size(); size += obj->elements()->Size();
} }
// For functions, also account non-empty context and literals sizes. // For functions, also account non-empty context and literals sizes.
@ -360,7 +360,7 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(String* name) {
HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name) { HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name) {
Heap::CollectAllGarbage(false); Heap::CollectAllGarbage(true);
HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++); HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++);
HeapSnapshotGenerator generator(result); HeapSnapshotGenerator generator(result);
generator.GenerateSnapshot(); generator.GenerateSnapshot();

522
deps/v8/src/heap.cc

@ -37,6 +37,7 @@
#include "global-handles.h" #include "global-handles.h"
#include "mark-compact.h" #include "mark-compact.h"
#include "natives.h" #include "natives.h"
#include "objects-visiting.h"
#include "scanner.h" #include "scanner.h"
#include "scopeinfo.h" #include "scopeinfo.h"
#include "snapshot.h" #include "snapshot.h"
@ -1032,6 +1033,17 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable(
} }
class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
public:
static inline void VisitPointer(Object** p) {
Object* object = *p;
if (!Heap::InNewSpace(object)) return;
Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
reinterpret_cast<HeapObject*>(object));
}
};
Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor, Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
Address new_space_front) { Address new_space_front) {
do { do {
@ -1042,10 +1054,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// queue is empty. // queue is empty.
while (new_space_front < new_space_.top()) { while (new_space_front < new_space_.top()) {
HeapObject* object = HeapObject::FromAddress(new_space_front); HeapObject* object = HeapObject::FromAddress(new_space_front);
Map* map = object->map(); new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
int size = object->SizeFromMap(map);
object->IterateBody(map->instance_type(), size, scavenge_visitor);
new_space_front += size;
} }
// Promote and process all the to-be-promoted objects. // Promote and process all the to-be-promoted objects.
@ -1072,315 +1081,231 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
} }
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) class ScavengingVisitor : public StaticVisitorBase {
static void RecordCopiedObject(HeapObject* obj) { public:
bool should_record = false; static void Initialize() {
#ifdef DEBUG table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
should_record = FLAG_heap_stats; table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
#endif table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
#ifdef ENABLE_LOGGING_AND_PROFILING table_.Register(kVisitByteArray, &EvacuateByteArray);
should_record = should_record || FLAG_log_gc; table_.Register(kVisitFixedArray, &EvacuateFixedArray);
#endif
if (should_record) {
if (Heap::new_space()->Contains(obj)) {
Heap::new_space()->RecordAllocation(obj);
} else {
Heap::new_space()->RecordPromotion(obj);
}
}
}
#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
typedef ObjectEvacuationStrategy<POINTER_OBJECT> PointerObject;
// Helper function used by CopyObject to copy a source object to an table_.Register(kVisitConsString,
// allocated target object and update the forwarding pointer in the source &ObjectEvacuationStrategy<POINTER_OBJECT>::
// object. Returns the target object. VisitSpecialized<ConsString::kSize>);
inline static HeapObject* MigrateObject(HeapObject* source,
HeapObject* target,
int size) {
// Copy the content of source to target.
Heap::CopyBlock(target->address(), source->address(), size);
// Set the forwarding address. table_.Register(kVisitSharedFunctionInfo,
source->set_map_word(MapWord::FromForwardingAddress(target)); &ObjectEvacuationStrategy<POINTER_OBJECT>::
VisitSpecialized<SharedFunctionInfo::kSize>);
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
// Update NewSpace stats if necessary. kVisitDataObject,
RecordCopiedObject(target); kVisitDataObjectGeneric>();
#endif
HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address()));
return target; table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
} kVisitJSObject,
kVisitJSObjectGeneric>();
table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
kVisitStruct,
kVisitStructGeneric>();
}
enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
enum SizeRestriction { SMALL, UNKNOWN_SIZE };
static inline void Scavenge(Map* map, HeapObject** slot, HeapObject* obj) {
table_.GetVisitor(map)(map, slot, obj);
}
template<ObjectContents object_contents, SizeRestriction size_restriction>
static inline void EvacuateObject(Map* map,
HeapObject** slot,
HeapObject* object,
int object_size) {
ASSERT((size_restriction != SMALL) ||
(object_size <= Page::kMaxHeapObjectSize));
ASSERT(object->Size() == object_size);
if (Heap::ShouldBePromoted(object->address(), object_size)) { private:
Object* result; enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
enum SizeRestriction { SMALL, UNKNOWN_SIZE };
if ((size_restriction != SMALL) && #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
(object_size > Page::kMaxHeapObjectSize)) { static void RecordCopiedObject(HeapObject* obj) {
result = Heap::lo_space()->AllocateRawFixedArray(object_size); bool should_record = false;
} else { #ifdef DEBUG
if (object_contents == DATA_OBJECT) { should_record = FLAG_heap_stats;
result = Heap::old_data_space()->AllocateRaw(object_size); #endif
#ifdef ENABLE_LOGGING_AND_PROFILING
should_record = should_record || FLAG_log_gc;
#endif
if (should_record) {
if (Heap::new_space()->Contains(obj)) {
Heap::new_space()->RecordAllocation(obj);
} else { } else {
result = Heap::old_pointer_space()->AllocateRaw(object_size); Heap::new_space()->RecordPromotion(obj);
} }
} }
if (!result->IsFailure()) {
HeapObject* target = HeapObject::cast(result);
*slot = MigrateObject(object, target, object_size);
if (object_contents == POINTER_OBJECT) {
promotion_queue.insert(target, object_size);
}
Heap::tracer()->increment_promoted_objects_size(object_size);
return;
}
} }
Object* result = Heap::new_space()->AllocateRaw(object_size); #endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
ASSERT(!result->IsFailure());
*slot = MigrateObject(object, HeapObject::cast(result), object_size);
return;
}
// Helper function used by CopyObject to copy a source object to an
// allocated target object and update the forwarding pointer in the source
// object. Returns the target object.
INLINE(static HeapObject* MigrateObject(HeapObject* source,
HeapObject* target,
int size)) {
// Copy the content of source to target.
Heap::CopyBlock(target->address(), source->address(), size);
template<int object_size_in_words, ObjectContents object_contents> // Set the forwarding address.
static inline void EvacuateObjectOfFixedSize(Map* map, source->set_map_word(MapWord::FromForwardingAddress(target));
HeapObject** slot,
HeapObject* object) {
const int object_size = object_size_in_words << kPointerSizeLog2;
EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
}
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
// Update NewSpace stats if necessary.
RecordCopiedObject(target);
#endif
HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address()));
template<ObjectContents object_contents> return target;
static inline void EvacuateObjectOfFixedSize(Map* map, }
HeapObject** slot,
HeapObject* object) {
int object_size = map->instance_size();
EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
}
static inline void EvacuateFixedArray(Map* map, template<ObjectContents object_contents, SizeRestriction size_restriction>
HeapObject** slot, static inline void EvacuateObject(Map* map,
HeapObject* object) { HeapObject** slot,
int object_size = FixedArray::cast(object)->FixedArraySize(); HeapObject* object,
EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size); int object_size) {
} ASSERT((size_restriction != SMALL) ||
(object_size <= Page::kMaxHeapObjectSize));
ASSERT(object->Size() == object_size);
if (Heap::ShouldBePromoted(object->address(), object_size)) {
Object* result;
static inline void EvacuateByteArray(Map* map, if ((size_restriction != SMALL) &&
HeapObject** slot, (object_size > Page::kMaxHeapObjectSize)) {
HeapObject* object) { result = Heap::lo_space()->AllocateRawFixedArray(object_size);
int object_size = ByteArray::cast(object)->ByteArraySize(); } else {
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size); if (object_contents == DATA_OBJECT) {
} result = Heap::old_data_space()->AllocateRaw(object_size);
} else {
result = Heap::old_pointer_space()->AllocateRaw(object_size);
}
}
if (!result->IsFailure()) {
HeapObject* target = HeapObject::cast(result);
*slot = MigrateObject(object, target, object_size);
static Scavenger GetScavengerForSize(int object_size, if (object_contents == POINTER_OBJECT) {
ObjectContents object_contents) { promotion_queue.insert(target, object_size);
ASSERT(IsAligned(object_size, kPointerSize)); }
ASSERT(object_size < Page::kMaxHeapObjectSize);
switch (object_size >> kPointerSizeLog2) { Heap::tracer()->increment_promoted_objects_size(object_size);
#define CASE(n) \ return;
case n: \
if (object_contents == DATA_OBJECT) { \
return static_cast<Scavenger>( \
&EvacuateObjectOfFixedSize<n, DATA_OBJECT>); \
} else { \
return static_cast<Scavenger>( \
&EvacuateObjectOfFixedSize<n, POINTER_OBJECT>); \
} }
}
Object* result = Heap::new_space()->AllocateRaw(object_size);
ASSERT(!result->IsFailure());
*slot = MigrateObject(object, HeapObject::cast(result), object_size);
return;
}
CASE(1);
CASE(2);
CASE(3);
CASE(4);
CASE(5);
CASE(6);
CASE(7);
CASE(8);
CASE(9);
CASE(10);
CASE(11);
CASE(12);
CASE(13);
CASE(14);
CASE(15);
CASE(16);
default:
if (object_contents == DATA_OBJECT) {
return static_cast<Scavenger>(&EvacuateObjectOfFixedSize<DATA_OBJECT>);
} else {
return static_cast<Scavenger>(
&EvacuateObjectOfFixedSize<POINTER_OBJECT>);
}
#undef CASE static inline void EvacuateFixedArray(Map* map,
HeapObject** slot,
HeapObject* object) {
int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
slot,
object,
object_size);
} }
}
static inline void EvacuateSeqAsciiString(Map* map, static inline void EvacuateByteArray(Map* map,
HeapObject** slot, HeapObject** slot,
HeapObject* object) { HeapObject* object) {
int object_size = SeqAsciiString::cast(object)-> int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
SeqAsciiStringSize(map->instance_type()); EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size); }
}
static inline void EvacuateSeqTwoByteString(Map* map, static inline void EvacuateSeqAsciiString(Map* map,
HeapObject** slot, HeapObject** slot,
HeapObject* object) { HeapObject* object) {
int object_size = SeqTwoByteString::cast(object)-> int object_size = SeqAsciiString::cast(object)->
SeqTwoByteStringSize(map->instance_type()); SeqAsciiStringSize(map->instance_type());
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size); EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
} }
static inline bool IsShortcutCandidate(int type) { static inline void EvacuateSeqTwoByteString(Map* map,
return ((type & kShortcutTypeMask) == kShortcutTypeTag); HeapObject** slot,
} HeapObject* object) {
int object_size = SeqTwoByteString::cast(object)->
SeqTwoByteStringSize(map->instance_type());
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
}
static inline void EvacuateShortcutCandidate(Map* map, static inline bool IsShortcutCandidate(int type) {
HeapObject** slot, return ((type & kShortcutTypeMask) == kShortcutTypeTag);
HeapObject* object) { }
ASSERT(IsShortcutCandidate(map->instance_type()));
if (ConsString::cast(object)->unchecked_second() == Heap::empty_string()) { static inline void EvacuateShortcutCandidate(Map* map,
HeapObject* first = HeapObject** slot,
HeapObject::cast(ConsString::cast(object)->unchecked_first()); HeapObject* object) {
ASSERT(IsShortcutCandidate(map->instance_type()));
*slot = first; if (ConsString::cast(object)->unchecked_second() == Heap::empty_string()) {
HeapObject* first =
HeapObject::cast(ConsString::cast(object)->unchecked_first());
if (!Heap::InNewSpace(first)) { *slot = first;
object->set_map_word(MapWord::FromForwardingAddress(first));
return; if (!Heap::InNewSpace(first)) {
} object->set_map_word(MapWord::FromForwardingAddress(first));
return;
}
MapWord first_word = first->map_word(); MapWord first_word = first->map_word();
if (first_word.IsForwardingAddress()) { if (first_word.IsForwardingAddress()) {
HeapObject* target = first_word.ToForwardingAddress(); HeapObject* target = first_word.ToForwardingAddress();
*slot = target;
object->set_map_word(MapWord::FromForwardingAddress(target));
return;
}
*slot = target; Scavenge(first->map(), slot, first);
object->set_map_word(MapWord::FromForwardingAddress(target)); object->set_map_word(MapWord::FromForwardingAddress(*slot));
return; return;
} }
first->map()->Scavenge(slot, first); int object_size = ConsString::kSize;
object->set_map_word(MapWord::FromForwardingAddress(*slot)); EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
return;
} }
int object_size = ConsString::kSize; template<ObjectContents object_contents>
EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size); class ObjectEvacuationStrategy {
} public:
template<int object_size>
static inline void VisitSpecialized(Map* map,
Scavenger Heap::GetScavenger(int instance_type, int instance_size) { HeapObject** slot,
if (instance_type < FIRST_NONSTRING_TYPE) { HeapObject* object) {
switch (instance_type & kStringRepresentationMask) { EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
case kSeqStringTag:
if ((instance_type & kStringEncodingMask) == kAsciiStringTag) {
return &EvacuateSeqAsciiString;
} else {
return &EvacuateSeqTwoByteString;
}
case kConsStringTag:
if (IsShortcutCandidate(instance_type)) {
return &EvacuateShortcutCandidate;
} else {
ASSERT(instance_size == ConsString::kSize);
return GetScavengerForSize(ConsString::kSize, POINTER_OBJECT);
}
case kExternalStringTag:
ASSERT(instance_size == ExternalString::kSize);
return GetScavengerForSize(ExternalString::kSize, DATA_OBJECT);
} }
UNREACHABLE();
}
switch (instance_type) { static inline void Visit(Map* map,
case BYTE_ARRAY_TYPE: HeapObject** slot,
return reinterpret_cast<Scavenger>(&EvacuateByteArray); HeapObject* object) {
int object_size = map->instance_size();
case FIXED_ARRAY_TYPE: EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
return reinterpret_cast<Scavenger>(&EvacuateFixedArray); }
};
case JS_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_VALUE_TYPE:
case JS_ARRAY_TYPE:
case JS_REGEXP_TYPE:
case JS_FUNCTION_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
case JS_BUILTINS_OBJECT_TYPE:
return GetScavengerForSize(instance_size, POINTER_OBJECT);
case ODDBALL_TYPE:
return NULL;
case PROXY_TYPE:
return GetScavengerForSize(Proxy::kSize, DATA_OBJECT);
case MAP_TYPE: typedef void (*Callback)(Map* map, HeapObject** slot, HeapObject* object);
return NULL;
case CODE_TYPE: static VisitorDispatchTable<Callback> table_;
return NULL; };
case JS_GLOBAL_PROPERTY_CELL_TYPE:
return NULL;
case HEAP_NUMBER_TYPE: VisitorDispatchTable<ScavengingVisitor::Callback> ScavengingVisitor::table_;
case FILLER_TYPE:
case PIXEL_ARRAY_TYPE:
case EXTERNAL_BYTE_ARRAY_TYPE:
case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
case EXTERNAL_SHORT_ARRAY_TYPE:
case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
case EXTERNAL_INT_ARRAY_TYPE:
case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
case EXTERNAL_FLOAT_ARRAY_TYPE:
return GetScavengerForSize(instance_size, DATA_OBJECT);
case SHARED_FUNCTION_INFO_TYPE:
return GetScavengerForSize(SharedFunctionInfo::kAlignedSize,
POINTER_OBJECT);
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE:
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
return GetScavengerForSize(instance_size, POINTER_OBJECT);
default:
UNREACHABLE();
return NULL;
}
}
void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
@ -1388,7 +1313,7 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
MapWord first_word = object->map_word(); MapWord first_word = object->map_word();
ASSERT(!first_word.IsForwardingAddress()); ASSERT(!first_word.IsForwardingAddress());
Map* map = first_word.ToMap(); Map* map = first_word.ToMap();
map->Scavenge(p, object); ScavengingVisitor::Scavenge(map, p, object);
} }
@ -1407,7 +1332,8 @@ Object* Heap::AllocatePartialMap(InstanceType instance_type,
reinterpret_cast<Map*>(result)->set_instance_type(instance_type); reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
reinterpret_cast<Map*>(result)->set_instance_size(instance_size); reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
reinterpret_cast<Map*>(result)-> reinterpret_cast<Map*>(result)->
set_scavenger(GetScavenger(instance_type, instance_size)); set_visitor_id(
StaticVisitorBase::GetVisitorId(instance_type, instance_size));
reinterpret_cast<Map*>(result)->set_inobject_properties(0); reinterpret_cast<Map*>(result)->set_inobject_properties(0);
reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0); reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
reinterpret_cast<Map*>(result)->set_unused_property_fields(0); reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
@ -1424,7 +1350,8 @@ Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
Map* map = reinterpret_cast<Map*>(result); Map* map = reinterpret_cast<Map*>(result);
map->set_map(meta_map()); map->set_map(meta_map());
map->set_instance_type(instance_type); map->set_instance_type(instance_type);
map->set_scavenger(GetScavenger(instance_type, instance_size)); map->set_visitor_id(
StaticVisitorBase::GetVisitorId(instance_type, instance_size));
map->set_prototype(null_value()); map->set_prototype(null_value());
map->set_constructor(null_value()); map->set_constructor(null_value());
map->set_instance_size(instance_size); map->set_instance_size(instance_size);
@ -2452,39 +2379,61 @@ class FlushingStackVisitor : public ThreadVisitor {
}; };
static void FlushCodeForFunction(SharedFunctionInfo* function_info) { static bool CodeIsActive(Code* code) {
// Make sure we are not referencing the code from the stack.
for (StackFrameIterator it; !it.done(); it.Advance()) {
if (code->contains(it.frame()->pc())) return true;
}
// Iterate the archived stacks in all threads to check if
// the code is referenced.
FlushingStackVisitor threadvisitor(code);
ThreadManager::IterateArchivedThreads(&threadvisitor);
if (threadvisitor.FoundCode()) return true;
return false;
}
static void FlushCodeForFunction(JSFunction* function) {
SharedFunctionInfo* shared_info = function->shared();
// Special handling if the function and shared info objects
// have different code objects.
if (function->code() != shared_info->code()) {
// If the shared function has been flushed but the function has not,
// we flush the function if possible.
if (!shared_info->is_compiled() && function->is_compiled() &&
!CodeIsActive(function->code())) {
function->set_code(shared_info->code());
}
return;
}
// The function must be compiled and have the source code available, // The function must be compiled and have the source code available,
// to be able to recompile it in case we need the function again. // to be able to recompile it in case we need the function again.
if (!(function_info->is_compiled() && function_info->HasSourceCode())) return; if (!(shared_info->is_compiled() && shared_info->HasSourceCode())) return;
// We never flush code for Api functions. // We never flush code for Api functions.
if (function_info->IsApiFunction()) return; if (shared_info->IsApiFunction()) return;
// Only flush code for functions. // Only flush code for functions.
if (!function_info->code()->kind() == Code::FUNCTION) return; if (!shared_info->code()->kind() == Code::FUNCTION) return;
// Function must be lazy compilable. // Function must be lazy compilable.
if (!function_info->allows_lazy_compilation()) return; if (!shared_info->allows_lazy_compilation()) return;
// If this is a full script wrapped in a function we do no flush the code. // If this is a full script wrapped in a function we do no flush the code.
if (function_info->is_toplevel()) return; if (shared_info->is_toplevel()) return;
// If this function is in the compilation cache we do not flush the code. // If this function is in the compilation cache we do not flush the code.
if (CompilationCache::HasFunction(function_info)) return; if (CompilationCache::HasFunction(shared_info)) return;
// Make sure we are not referencing the code from the stack. // Check stack and archived threads for the code.
for (StackFrameIterator it; !it.done(); it.Advance()) { if (CodeIsActive(shared_info->code())) return;
if (function_info->code()->contains(it.frame()->pc())) return;
}
// Iterate the archived stacks in all threads to check if
// the code is referenced.
FlushingStackVisitor threadvisitor(function_info->code());
ThreadManager::IterateArchivedThreads(&threadvisitor);
if (threadvisitor.FoundCode()) return;
// Compute the lazy compilable version of the code. // Compute the lazy compilable version of the code.
HandleScope scope; Code* code = Builtins::builtin(Builtins::LazyCompile);
function_info->set_code(*ComputeLazyCompile(function_info->length())); shared_info->set_code(code);
function->set_code(code);
} }
@ -2496,12 +2445,12 @@ void Heap::FlushCode() {
HeapObjectIterator it(old_pointer_space()); HeapObjectIterator it(old_pointer_space());
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) { for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
if (obj->IsJSFunction()) { if (obj->IsJSFunction()) {
JSFunction* jsfunction = JSFunction::cast(obj); JSFunction* function = JSFunction::cast(obj);
// The function must have a valid context and not be a builtin. // The function must have a valid context and not be a builtin.
if (jsfunction->unchecked_context()->IsContext() && if (function->unchecked_context()->IsContext() &&
!jsfunction->IsBuiltin()) { !function->IsBuiltin()) {
FlushCodeForFunction(jsfunction->shared()); FlushCodeForFunction(function);
} }
} }
} }
@ -2651,6 +2600,7 @@ Object* Heap::InitializeFunction(JSFunction* function,
function->initialize_properties(); function->initialize_properties();
function->initialize_elements(); function->initialize_elements();
function->set_shared(shared); function->set_shared(shared);
function->set_code(shared->code());
function->set_prototype_or_initial_map(prototype); function->set_prototype_or_initial_map(prototype);
function->set_context(undefined_value()); function->set_context(undefined_value());
function->set_literals(empty_fixed_array()); function->set_literals(empty_fixed_array());
@ -4000,7 +3950,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]); v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
v->Synchronize("strong_root_list"); v->Synchronize("strong_root_list");
v->VisitPointer(BitCast<Object**, String**>(&hidden_symbol_)); v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
v->Synchronize("symbol"); v->Synchronize("symbol");
Bootstrapper::Iterate(v); Bootstrapper::Iterate(v);
@ -4126,6 +4076,7 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->memory_allocator_size = MemoryAllocator::Size(); *stats->memory_allocator_size = MemoryAllocator::Size();
*stats->memory_allocator_capacity = *stats->memory_allocator_capacity =
MemoryAllocator::Size() + MemoryAllocator::Available(); MemoryAllocator::Size() + MemoryAllocator::Available();
*stats->os_error = OS::GetLastError();
if (take_snapshot) { if (take_snapshot) {
HeapIterator iterator; HeapIterator iterator;
for (HeapObject* obj = iterator.next(); for (HeapObject* obj = iterator.next();
@ -4174,6 +4125,10 @@ bool Heap::Setup(bool create_heap_objects) {
if (!ConfigureHeapDefault()) return false; if (!ConfigureHeapDefault()) return false;
} }
ScavengingVisitor::Initialize();
NewSpaceScavenger::Initialize();
MarkCompactCollector::Initialize();
// Setup memory allocator and reserve a chunk of memory for new // Setup memory allocator and reserve a chunk of memory for new
// space. The chunk is double the size of the requested reserved // space. The chunk is double the size of the requested reserved
// new space size to ensure that we can find a pair of semispaces that // new space size to ensure that we can find a pair of semispaces that
@ -4858,6 +4813,7 @@ GCTracer::~GCTracer() {
PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL])); PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK])); PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP])); PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT])); PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
PrintF("flushcode=%d ", static_cast<int>(scopes_[Scope::MC_FLUSH_CODE])); PrintF("flushcode=%d ", static_cast<int>(scopes_[Scope::MC_FLUSH_CODE]));

6
deps/v8/src/heap.h

@ -983,8 +983,6 @@ class Heap : public AllStatic {
static void RecordStats(HeapStats* stats, bool take_snapshot = false); static void RecordStats(HeapStats* stats, bool take_snapshot = false);
static Scavenger GetScavenger(int instance_type, int instance_size);
// Copy block of memory from src to dst. Size of block should be aligned // Copy block of memory from src to dst. Size of block should be aligned
// by pointer size. // by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size); static inline void CopyBlock(Address dst, Address src, int byte_size);
@ -1347,7 +1345,8 @@ class HeapStats {
int* memory_allocator_capacity; // 20 int* memory_allocator_capacity; // 20
int* objects_per_type; // 21 int* objects_per_type; // 21
int* size_per_type; // 22 int* size_per_type; // 22
int* end_marker; // 23 int* os_error; // 23
int* end_marker; // 24
}; };
@ -1725,6 +1724,7 @@ class GCTracer BASE_EMBEDDED {
EXTERNAL, EXTERNAL,
MC_MARK, MC_MARK,
MC_SWEEP, MC_SWEEP,
MC_SWEEP_NEWSPACE,
MC_COMPACT, MC_COMPACT,
MC_FLUSH_CODE, MC_FLUSH_CODE,
kNumberOfScopes kNumberOfScopes

24
deps/v8/src/ia32/assembler-ia32-inl.h

@ -183,6 +183,30 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
} }
template<typename StaticVisitor>
void RelocInfo::Visit() {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitPointer(target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
StaticVisitor::VisitRuntimeEntry(this);
}
}
Immediate::Immediate(int x) { Immediate::Immediate(int x) {
x_ = x; x_ = x;
rmode_ = RelocInfo::NONE; rmode_ = RelocInfo::NONE;

15
deps/v8/src/ia32/assembler-ia32.cc

@ -1142,6 +1142,21 @@ void Assembler::rcl(Register dst, uint8_t imm8) {
} }
void Assembler::rcr(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
ASSERT(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
EMIT(0xD1);
EMIT(0xD8 | dst.code());
} else {
EMIT(0xC1);
EMIT(0xD8 | dst.code());
EMIT(imm8);
}
}
void Assembler::sar(Register dst, uint8_t imm8) { void Assembler::sar(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;

1
deps/v8/src/ia32/assembler-ia32.h

@ -625,6 +625,7 @@ class Assembler : public Malloced {
void or_(const Operand& dst, const Immediate& x); void or_(const Operand& dst, const Immediate& x);
void rcl(Register dst, uint8_t imm8); void rcl(Register dst, uint8_t imm8);
void rcr(Register dst, uint8_t imm8);
void sar(Register dst, uint8_t imm8); void sar(Register dst, uint8_t imm8);
void sar_cl(Register dst); void sar_cl(Register dst);

22
deps/v8/src/ia32/builtins-ia32.cc

@ -429,6 +429,26 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
} }
void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
// Enter an internal frame.
__ EnterInternalFrame();
// Push a copy of the function onto the stack.
__ push(edi);
__ push(edi); // Function is also the parameter to the runtime call.
__ CallRuntime(Runtime::kLazyCompile, 1);
__ pop(edi);
// Tear down temporary frame.
__ LeaveInternalFrame();
// Do a tail-call of the compiled function.
__ lea(ecx, FieldOperand(eax, Code::kHeaderSize));
__ jmp(Operand(ecx));
}
void Builtins::Generate_FunctionCall(MacroAssembler* masm) { void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument. // 1. Make sure we have at least one argument.
{ Label done; { Label done;
@ -548,7 +568,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ mov(ebx, __ mov(ebx,
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
__ SmiUntag(ebx); __ SmiUntag(ebx);
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); __ mov(edx, FieldOperand(edi, JSFunction::kCodeOffset));
__ lea(edx, FieldOperand(edx, Code::kHeaderSize)); __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
__ cmp(eax, Operand(ebx)); __ cmp(eax, Operand(ebx));
__ j(not_equal, Handle<Code>(builtin(ArgumentsAdaptorTrampoline))); __ j(not_equal, Handle<Code>(builtin(ArgumentsAdaptorTrampoline)));

722
deps/v8/src/ia32/codegen-ia32.cc

@ -202,105 +202,92 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// esi: callee's context // esi: callee's context
allocator_->Initialize(); allocator_->Initialize();
if (info->mode() == CompilationInfo::PRIMARY) { frame_->Enter();
frame_->Enter();
// Allocate space for locals and initialize them.
// Allocate space for locals and initialize them. frame_->AllocateStackSlots();
frame_->AllocateStackSlots();
// Allocate the local context if needed.
// Allocate the local context if needed. int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; if (heap_slots > 0) {
if (heap_slots > 0) { Comment cmnt(masm_, "[ allocate local context");
Comment cmnt(masm_, "[ allocate local context"); // Allocate local context.
// Allocate local context. // Get outer context and create a new context based on it.
// Get outer context and create a new context based on it. frame_->PushFunction();
frame_->PushFunction(); Result context;
Result context; if (heap_slots <= FastNewContextStub::kMaximumSlots) {
if (heap_slots <= FastNewContextStub::kMaximumSlots) { FastNewContextStub stub(heap_slots);
FastNewContextStub stub(heap_slots); context = frame_->CallStub(&stub, 1);
context = frame_->CallStub(&stub, 1); } else {
} else { context = frame_->CallRuntime(Runtime::kNewContext, 1);
context = frame_->CallRuntime(Runtime::kNewContext, 1); }
}
// Update context local. // Update context local.
frame_->SaveContextRegister(); frame_->SaveContextRegister();
// Verify that the runtime call result and esi agree. // Verify that the runtime call result and esi agree.
if (FLAG_debug_code) { if (FLAG_debug_code) {
__ cmp(context.reg(), Operand(esi)); __ cmp(context.reg(), Operand(esi));
__ Assert(equal, "Runtime::NewContext should end up in esi"); __ Assert(equal, "Runtime::NewContext should end up in esi");
}
} }
}
// TODO(1241774): Improve this code: // TODO(1241774): Improve this code:
// 1) only needed if we have a context // 1) only needed if we have a context
// 2) no need to recompute context ptr every single time // 2) no need to recompute context ptr every single time
// 3) don't copy parameter operand code from SlotOperand! // 3) don't copy parameter operand code from SlotOperand!
{ {
Comment cmnt2(masm_, "[ copy context parameters into .context"); Comment cmnt2(masm_, "[ copy context parameters into .context");
// Note that iteration order is relevant here! If we have the same // Note that iteration order is relevant here! If we have the same
// parameter twice (e.g., function (x, y, x)), and that parameter // parameter twice (e.g., function (x, y, x)), and that parameter
// needs to be copied into the context, it must be the last argument // needs to be copied into the context, it must be the last argument
// passed to the parameter that needs to be copied. This is a rare // passed to the parameter that needs to be copied. This is a rare
// case so we don't check for it, instead we rely on the copying // case so we don't check for it, instead we rely on the copying
// order: such a parameter is copied repeatedly into the same // order: such a parameter is copied repeatedly into the same
// context location and thus the last value is what is seen inside // context location and thus the last value is what is seen inside
// the function. // the function.
for (int i = 0; i < scope()->num_parameters(); i++) { for (int i = 0; i < scope()->num_parameters(); i++) {
Variable* par = scope()->parameter(i); Variable* par = scope()->parameter(i);
Slot* slot = par->slot(); Slot* slot = par->slot();
if (slot != NULL && slot->type() == Slot::CONTEXT) { if (slot != NULL && slot->type() == Slot::CONTEXT) {
// The use of SlotOperand below is safe in unspilled code // The use of SlotOperand below is safe in unspilled code
// because the slot is guaranteed to be a context slot. // because the slot is guaranteed to be a context slot.
// //
// There are no parameters in the global scope. // There are no parameters in the global scope.
ASSERT(!scope()->is_global_scope()); ASSERT(!scope()->is_global_scope());
frame_->PushParameterAt(i); frame_->PushParameterAt(i);
Result value = frame_->Pop(); Result value = frame_->Pop();
value.ToRegister(); value.ToRegister();
// SlotOperand loads context.reg() with the context object // SlotOperand loads context.reg() with the context object
// stored to, used below in RecordWrite. // stored to, used below in RecordWrite.
Result context = allocator_->Allocate(); Result context = allocator_->Allocate();
ASSERT(context.is_valid()); ASSERT(context.is_valid());
__ mov(SlotOperand(slot, context.reg()), value.reg()); __ mov(SlotOperand(slot, context.reg()), value.reg());
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
Result scratch = allocator_->Allocate(); Result scratch = allocator_->Allocate();
ASSERT(scratch.is_valid()); ASSERT(scratch.is_valid());
frame_->Spill(context.reg()); frame_->Spill(context.reg());
frame_->Spill(value.reg()); frame_->Spill(value.reg());
__ RecordWrite(context.reg(), offset, value.reg(), scratch.reg()); __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
}
} }
} }
}
// Store the arguments object. This must happen after context // Store the arguments object. This must happen after context
// initialization because the arguments object may be stored in // initialization because the arguments object may be stored in
// the context. // the context.
if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) { if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
StoreArgumentsObject(true); StoreArgumentsObject(true);
} }
// Initialize ThisFunction reference if present.
if (scope()->is_function_scope() && scope()->function() != NULL) {
frame_->Push(Factory::the_hole_value());
StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
}
} else {
// When used as the secondary compiler for splitting, ebp, esi,
// and edi have been pushed on the stack. Adjust the virtual
// frame to match this state.
frame_->Adjust(3);
allocator_->Unuse(edi);
// Bind all the bailout labels to the beginning of the function. // Initialize ThisFunction reference if present.
List<CompilationInfo::Bailout*>* bailouts = info->bailouts(); if (scope()->is_function_scope() && scope()->function() != NULL) {
for (int i = 0; i < bailouts->length(); i++) { frame_->Push(Factory::the_hole_value());
__ bind(bailouts->at(i)->label()); StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
}
} }
// Initialize the function return target after the locals are set // Initialize the function return target after the locals are set
// up, because it needs the expected frame height from the frame. // up, because it needs the expected frame height from the frame.
function_return_.set_direction(JumpTarget::BIDIRECTIONAL); function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
@ -1038,7 +1025,11 @@ const char* GenericBinaryOpStub::GetName() {
} }
// Call the specialized stub for a binary operation. // Perform or call the specialized stub for a binary operation. Requires the
// three registers left, right and dst to be distinct and spilled. This
// deferred operation has up to three entry points: The main one calls the
// runtime system. The second is for when the result is a non-Smi. The
// third is for when at least one of the inputs is non-Smi and we have SSE2.
class DeferredInlineBinaryOperation: public DeferredCode { class DeferredInlineBinaryOperation: public DeferredCode {
public: public:
DeferredInlineBinaryOperation(Token::Value op, DeferredInlineBinaryOperation(Token::Value op,
@ -1051,11 +1042,23 @@ class DeferredInlineBinaryOperation: public DeferredCode {
: op_(op), dst_(dst), left_(left), right_(right), : op_(op), dst_(dst), left_(left), right_(right),
left_info_(left_info), right_info_(right_info), mode_(mode) { left_info_(left_info), right_info_(right_info), mode_(mode) {
set_comment("[ DeferredInlineBinaryOperation"); set_comment("[ DeferredInlineBinaryOperation");
ASSERT(!left.is(right));
} }
virtual void Generate(); virtual void Generate();
// This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
// Exit().
virtual bool AutoSaveAndRestore() { return false; }
void JumpToAnswerOutOfRange(Condition cond);
void JumpToConstantRhs(Condition cond, Smi* smi_value);
Label* NonSmiInputLabel();
private: private:
void GenerateAnswerOutOfRange();
void GenerateNonSmiInput();
Token::Value op_; Token::Value op_;
Register dst_; Register dst_;
Register left_; Register left_;
@ -1063,15 +1066,42 @@ class DeferredInlineBinaryOperation: public DeferredCode {
TypeInfo left_info_; TypeInfo left_info_;
TypeInfo right_info_; TypeInfo right_info_;
OverwriteMode mode_; OverwriteMode mode_;
Label answer_out_of_range_;
Label non_smi_input_;
Label constant_rhs_;
Smi* smi_value_;
}; };
Label* DeferredInlineBinaryOperation::NonSmiInputLabel() {
if (Token::IsBitOp(op_) && CpuFeatures::IsSupported(SSE2)) {
return &non_smi_input_;
} else {
return entry_label();
}
}
void DeferredInlineBinaryOperation::JumpToAnswerOutOfRange(Condition cond) {
__ j(cond, &answer_out_of_range_);
}
void DeferredInlineBinaryOperation::JumpToConstantRhs(Condition cond,
Smi* smi_value) {
smi_value_ = smi_value;
__ j(cond, &constant_rhs_);
}
void DeferredInlineBinaryOperation::Generate() { void DeferredInlineBinaryOperation::Generate() {
Label done; // Registers are not saved implicitly for this stub, so we should not
if (CpuFeatures::IsSupported(SSE2) && ((op_ == Token::ADD) || // tread on the registers that were not passed to us.
(op_ ==Token::SUB) || if (CpuFeatures::IsSupported(SSE2) &&
(op_ == Token::MUL) || ((op_ == Token::ADD) ||
(op_ == Token::DIV))) { (op_ == Token::SUB) ||
(op_ == Token::MUL) ||
(op_ == Token::DIV))) {
CpuFeatures::Scope use_sse2(SSE2); CpuFeatures::Scope use_sse2(SSE2);
Label call_runtime, after_alloc_failure; Label call_runtime, after_alloc_failure;
Label left_smi, right_smi, load_right, do_op; Label left_smi, right_smi, load_right, do_op;
@ -1131,7 +1161,6 @@ void DeferredInlineBinaryOperation::Generate() {
__ cvtsi2sd(xmm1, Operand(right_)); __ cvtsi2sd(xmm1, Operand(right_));
__ SmiTag(right_); __ SmiTag(right_);
if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) { if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
Label alloc_failure;
__ push(left_); __ push(left_);
__ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure); __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
__ pop(left_); __ pop(left_);
@ -1146,19 +1175,200 @@ void DeferredInlineBinaryOperation::Generate() {
default: UNREACHABLE(); default: UNREACHABLE();
} }
__ movdbl(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0); __ movdbl(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
__ jmp(&done); Exit();
__ bind(&after_alloc_failure); __ bind(&after_alloc_failure);
__ pop(left_); __ pop(left_);
__ bind(&call_runtime); __ bind(&call_runtime);
} }
// Register spilling is not done implicitly for this stub.
// We can't postpone it any more now though.
SaveRegisters();
GenericBinaryOpStub stub(op_, GenericBinaryOpStub stub(op_,
mode_, mode_,
NO_SMI_CODE_IN_STUB, NO_SMI_CODE_IN_STUB,
TypeInfo::Combine(left_info_, right_info_)); TypeInfo::Combine(left_info_, right_info_));
stub.GenerateCall(masm_, left_, right_); stub.GenerateCall(masm_, left_, right_);
if (!dst_.is(eax)) __ mov(dst_, eax); if (!dst_.is(eax)) __ mov(dst_, eax);
__ bind(&done); RestoreRegisters();
Exit();
if (non_smi_input_.is_linked() || constant_rhs_.is_linked()) {
GenerateNonSmiInput();
}
if (answer_out_of_range_.is_linked()) {
GenerateAnswerOutOfRange();
}
}
void DeferredInlineBinaryOperation::GenerateNonSmiInput() {
// We know at least one of the inputs was not a Smi.
// This is a third entry point into the deferred code.
// We may not overwrite left_ because we want to be able
// to call the handling code for non-smi answer and it
// might want to overwrite the heap number in left_.
ASSERT(!right_.is(dst_));
ASSERT(!left_.is(dst_));
ASSERT(!left_.is(right_));
// This entry point is used for bit ops where the right hand side
// is a constant Smi and the left hand side is a heap object. It
// is also used for bit ops where both sides are unknown, but where
// at least one of them is a heap object.
bool rhs_is_constant = constant_rhs_.is_linked();
// We can't generate code for both cases.
ASSERT(!non_smi_input_.is_linked() || !constant_rhs_.is_linked());
if (FLAG_debug_code) {
__ int3(); // We don't fall through into this code.
}
__ bind(&non_smi_input_);
if (rhs_is_constant) {
__ bind(&constant_rhs_);
// In this case the input is a heap object and it is in the dst_ register.
// The left_ and right_ registers have not been initialized yet.
__ mov(right_, Immediate(smi_value_));
__ mov(left_, Operand(dst_));
if (!CpuFeatures::IsSupported(SSE2)) {
__ jmp(entry_label());
return;
} else {
CpuFeatures::Scope use_sse2(SSE2);
__ JumpIfNotNumber(dst_, left_info_, entry_label());
__ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
__ SmiUntag(right_);
}
} else {
// We know we have SSE2 here because otherwise the label is not linked (see
// NonSmiInputLabel).
CpuFeatures::Scope use_sse2(SSE2);
// Handle the non-constant right hand side situation:
if (left_info_.IsSmi()) {
// Right is a heap object.
__ JumpIfNotNumber(right_, right_info_, entry_label());
__ ConvertToInt32(right_, right_, dst_, right_info_, entry_label());
__ mov(dst_, Operand(left_));
__ SmiUntag(dst_);
} else if (right_info_.IsSmi()) {
// Left is a heap object.
__ JumpIfNotNumber(left_, left_info_, entry_label());
__ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
__ SmiUntag(right_);
} else {
// Here we don't know if it's one or both that is a heap object.
Label only_right_is_heap_object, got_both;
__ mov(dst_, Operand(left_));
__ SmiUntag(dst_, &only_right_is_heap_object);
// Left was a heap object.
__ JumpIfNotNumber(left_, left_info_, entry_label());
__ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
__ SmiUntag(right_, &got_both);
// Both were heap objects.
__ rcl(right_, 1); // Put tag back.
__ JumpIfNotNumber(right_, right_info_, entry_label());
__ ConvertToInt32(right_, right_, no_reg, right_info_, entry_label());
__ jmp(&got_both);
__ bind(&only_right_is_heap_object);
__ JumpIfNotNumber(right_, right_info_, entry_label());
__ ConvertToInt32(right_, right_, no_reg, right_info_, entry_label());
__ bind(&got_both);
}
}
ASSERT(op_ == Token::BIT_AND ||
op_ == Token::BIT_OR ||
op_ == Token::BIT_XOR ||
right_.is(ecx));
switch (op_) {
case Token::BIT_AND: __ and_(dst_, Operand(right_)); break;
case Token::BIT_OR: __ or_(dst_, Operand(right_)); break;
case Token::BIT_XOR: __ xor_(dst_, Operand(right_)); break;
case Token::SHR: __ shr_cl(dst_); break;
case Token::SAR: __ sar_cl(dst_); break;
case Token::SHL: __ shl_cl(dst_); break;
default: UNREACHABLE();
}
if (op_ == Token::SHR) {
// Check that the *unsigned* result fits in a smi. Neither of
// the two high-order bits can be set:
// * 0x80000000: high bit would be lost when smi tagging.
// * 0x40000000: this number would convert to negative when smi
// tagging.
__ test(dst_, Immediate(0xc0000000));
__ j(not_zero, &answer_out_of_range_);
} else {
// Check that the *signed* result fits in a smi.
__ cmp(dst_, 0xc0000000);
__ j(negative, &answer_out_of_range_);
}
__ SmiTag(dst_);
Exit();
}
void DeferredInlineBinaryOperation::GenerateAnswerOutOfRange() {
Label after_alloc_failure2;
Label allocation_ok;
__ bind(&after_alloc_failure2);
// We have to allocate a number, causing a GC, while keeping hold of
// the answer in dst_. The answer is not a Smi. We can't just call the
// runtime shift function here because we already threw away the inputs.
__ xor_(left_, Operand(left_));
__ shl(dst_, 1); // Put top bit in carry flag and Smi tag the low bits.
__ rcr(left_, 1); // Rotate with carry.
__ push(dst_); // Smi tagged low 31 bits.
__ push(left_); // 0 or 0x80000000, which is Smi tagged in both cases.
__ CallRuntime(Runtime::kNumberAlloc, 0);
if (!left_.is(eax)) {
__ mov(left_, eax);
}
__ pop(right_); // High bit.
__ pop(dst_); // Low 31 bits.
__ shr(dst_, 1); // Put 0 in top bit.
__ or_(dst_, Operand(right_));
__ jmp(&allocation_ok);
// This is the second entry point to the deferred code. It is used only by
// the bit operations.
// The dst_ register has the answer. It is not Smi tagged. If mode_ is
// OVERWRITE_LEFT then left_ must contain either an overwritable heap number
// or a Smi.
// Put a heap number pointer in left_.
__ bind(&answer_out_of_range_);
SaveRegisters();
if (mode_ == OVERWRITE_LEFT) {
__ test(left_, Immediate(kSmiTagMask));
__ j(not_zero, &allocation_ok);
}
// This trashes right_.
__ AllocateHeapNumber(left_, right_, no_reg, &after_alloc_failure2);
__ bind(&allocation_ok);
if (CpuFeatures::IsSupported(SSE2) && op_ != Token::SHR) {
CpuFeatures::Scope use_sse2(SSE2);
ASSERT(Token::IsBitOp(op_));
// Signed conversion.
__ cvtsi2sd(xmm0, Operand(dst_));
__ movdbl(FieldOperand(left_, HeapNumber::kValueOffset), xmm0);
} else {
if (op_ == Token::SHR) {
__ push(Immediate(0)); // High word of unsigned value.
__ push(dst_);
__ fild_d(Operand(esp, 0));
__ Drop(2);
} else {
ASSERT(Token::IsBitOp(op_));
__ push(dst_);
__ fild_s(Operand(esp, 0)); // Signed conversion.
__ pop(dst_);
}
__ fstp_d(FieldOperand(left_, HeapNumber::kValueOffset));
}
__ mov(dst_, left_);
RestoreRegisters();
Exit();
} }
@ -1499,10 +1709,25 @@ void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
TypeInfo left_info, TypeInfo left_info,
TypeInfo right_info, TypeInfo right_info,
DeferredCode* deferred) { DeferredCode* deferred) {
JumpIfNotBothSmiUsingTypeInfo(left,
right,
scratch,
left_info,
right_info,
deferred->entry_label());
}
void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
Register right,
Register scratch,
TypeInfo left_info,
TypeInfo right_info,
Label* on_not_smi) {
if (left.is(right)) { if (left.is(right)) {
if (!left_info.IsSmi()) { if (!left_info.IsSmi()) {
__ test(left, Immediate(kSmiTagMask)); __ test(left, Immediate(kSmiTagMask));
deferred->Branch(not_zero); __ j(not_zero, on_not_smi);
} else { } else {
if (FLAG_debug_code) __ AbortIfNotSmi(left); if (FLAG_debug_code) __ AbortIfNotSmi(left);
} }
@ -1511,17 +1736,17 @@ void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
__ mov(scratch, left); __ mov(scratch, left);
__ or_(scratch, Operand(right)); __ or_(scratch, Operand(right));
__ test(scratch, Immediate(kSmiTagMask)); __ test(scratch, Immediate(kSmiTagMask));
deferred->Branch(not_zero); __ j(not_zero, on_not_smi);
} else { } else {
__ test(left, Immediate(kSmiTagMask)); __ test(left, Immediate(kSmiTagMask));
deferred->Branch(not_zero); __ j(not_zero, on_not_smi);
if (FLAG_debug_code) __ AbortIfNotSmi(right); if (FLAG_debug_code) __ AbortIfNotSmi(right);
} }
} else { } else {
if (FLAG_debug_code) __ AbortIfNotSmi(left); if (FLAG_debug_code) __ AbortIfNotSmi(left);
if (!right_info.IsSmi()) { if (!right_info.IsSmi()) {
__ test(right, Immediate(kSmiTagMask)); __ test(right, Immediate(kSmiTagMask));
deferred->Branch(not_zero); __ j(not_zero, on_not_smi);
} else { } else {
if (FLAG_debug_code) __ AbortIfNotSmi(right); if (FLAG_debug_code) __ AbortIfNotSmi(right);
} }
@ -1606,13 +1831,16 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
right->ToRegister(); right->ToRegister();
frame_->Spill(eax); frame_->Spill(eax);
frame_->Spill(edx); frame_->Spill(edx);
// DeferredInlineBinaryOperation requires all the registers that it is
// told about to be spilled and distinct.
Result distinct_right = frame_->MakeDistinctAndSpilled(left, right);
// Check that left and right are smi tagged. // Check that left and right are smi tagged.
DeferredInlineBinaryOperation* deferred = DeferredInlineBinaryOperation* deferred =
new DeferredInlineBinaryOperation(op, new DeferredInlineBinaryOperation(op,
(op == Token::DIV) ? eax : edx, (op == Token::DIV) ? eax : edx,
left->reg(), left->reg(),
right->reg(), distinct_right.reg(),
left_type_info, left_type_info,
right_type_info, right_type_info,
overwrite_mode); overwrite_mode);
@ -1695,15 +1923,24 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
left->ToRegister(); left->ToRegister();
ASSERT(left->is_register() && !left->reg().is(ecx)); ASSERT(left->is_register() && !left->reg().is(ecx));
ASSERT(right->is_register() && right->reg().is(ecx)); ASSERT(right->is_register() && right->reg().is(ecx));
if (left_type_info.IsSmi()) {
if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
}
if (right_type_info.IsSmi()) {
if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
}
// We will modify right, it must be spilled. // We will modify right, it must be spilled.
frame_->Spill(ecx); frame_->Spill(ecx);
// DeferredInlineBinaryOperation requires all the registers that it is told
// about to be spilled and distinct. We know that right is ecx and left is
// not ecx.
frame_->Spill(left->reg());
// Use a fresh answer register to avoid spilling the left operand. // Use a fresh answer register to avoid spilling the left operand.
answer = allocator_->Allocate(); answer = allocator_->Allocate();
ASSERT(answer.is_valid()); ASSERT(answer.is_valid());
// Check that both operands are smis using the answer register as a
// temporary.
DeferredInlineBinaryOperation* deferred = DeferredInlineBinaryOperation* deferred =
new DeferredInlineBinaryOperation(op, new DeferredInlineBinaryOperation(op,
answer.reg(), answer.reg(),
@ -1712,55 +1949,28 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
left_type_info, left_type_info,
right_type_info, right_type_info,
overwrite_mode); overwrite_mode);
JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
left_type_info, right_type_info,
deferred->NonSmiInputLabel());
Label do_op, left_nonsmi; // Untag both operands.
// If right is a smi we make a fast case if left is either a smi __ mov(answer.reg(), left->reg());
// or a heapnumber. __ SmiUntag(answer.reg());
if (CpuFeatures::IsSupported(SSE2) && right_type_info.IsSmi()) { __ SmiUntag(right->reg()); // Right is ecx.
CpuFeatures::Scope use_sse2(SSE2);
__ mov(answer.reg(), left->reg());
// Fast case - both are actually smis.
if (!left_type_info.IsSmi()) {
__ test(answer.reg(), Immediate(kSmiTagMask));
__ j(not_zero, &left_nonsmi);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
}
if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
__ SmiUntag(answer.reg());
__ jmp(&do_op);
__ bind(&left_nonsmi);
// Branch if not a heapnumber.
__ cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
Factory::heap_number_map());
deferred->Branch(not_equal);
// Load integer value into answer register using truncation.
__ cvttsd2si(answer.reg(),
FieldOperand(answer.reg(), HeapNumber::kValueOffset));
// Branch if we do not fit in a smi.
__ cmp(answer.reg(), 0xc0000000);
deferred->Branch(negative);
} else {
JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
left_type_info, right_type_info, deferred);
// Untag both operands.
__ mov(answer.reg(), left->reg());
__ SmiUntag(answer.reg());
}
__ bind(&do_op);
__ SmiUntag(ecx);
// Perform the operation. // Perform the operation.
ASSERT(right->reg().is(ecx));
switch (op) { switch (op) {
case Token::SAR: case Token::SAR: {
__ sar_cl(answer.reg()); __ sar_cl(answer.reg());
// No checks of result necessary if (!left_type_info.IsSmi()) {
// Check that the *signed* result fits in a smi.
__ cmp(answer.reg(), 0xc0000000);
deferred->JumpToAnswerOutOfRange(negative);
}
break; break;
}
case Token::SHR: { case Token::SHR: {
Label result_ok;
__ shr_cl(answer.reg()); __ shr_cl(answer.reg());
// Check that the *unsigned* result fits in a smi. Neither of // Check that the *unsigned* result fits in a smi. Neither of
// the two high-order bits can be set: // the two high-order bits can be set:
@ -1773,21 +1983,14 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
// case. The low bit of the left argument may be lost, but only // case. The low bit of the left argument may be lost, but only
// in a case where it is dropped anyway. // in a case where it is dropped anyway.
__ test(answer.reg(), Immediate(0xc0000000)); __ test(answer.reg(), Immediate(0xc0000000));
__ j(zero, &result_ok); deferred->JumpToAnswerOutOfRange(not_zero);
__ SmiTag(ecx);
deferred->Jump();
__ bind(&result_ok);
break; break;
} }
case Token::SHL: { case Token::SHL: {
Label result_ok;
__ shl_cl(answer.reg()); __ shl_cl(answer.reg());
// Check that the *signed* result fits in a smi. // Check that the *signed* result fits in a smi.
__ cmp(answer.reg(), 0xc0000000); __ cmp(answer.reg(), 0xc0000000);
__ j(positive, &result_ok); deferred->JumpToAnswerOutOfRange(negative);
__ SmiTag(ecx);
deferred->Jump();
__ bind(&result_ok);
break; break;
} }
default: default:
@ -1805,6 +2008,9 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
// Handle the other binary operations. // Handle the other binary operations.
left->ToRegister(); left->ToRegister();
right->ToRegister(); right->ToRegister();
// DeferredInlineBinaryOperation requires all the registers that it is told
// about to be spilled.
Result distinct_right = frame_->MakeDistinctAndSpilled(left, right);
// A newly allocated register answer is used to hold the answer. The // A newly allocated register answer is used to hold the answer. The
// registers containing left and right are not modified so they don't // registers containing left and right are not modified so they don't
// need to be spilled in the fast case. // need to be spilled in the fast case.
@ -1816,12 +2022,16 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
new DeferredInlineBinaryOperation(op, new DeferredInlineBinaryOperation(op,
answer.reg(), answer.reg(),
left->reg(), left->reg(),
right->reg(), distinct_right.reg(),
left_type_info, left_type_info,
right_type_info, right_type_info,
overwrite_mode); overwrite_mode);
JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(), Label non_smi_bit_op;
left_type_info, right_type_info, deferred); if (op != Token::BIT_OR) {
JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
left_type_info, right_type_info,
deferred->NonSmiInputLabel());
}
__ mov(answer.reg(), left->reg()); __ mov(answer.reg(), left->reg());
switch (op) { switch (op) {
@ -1864,6 +2074,8 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
case Token::BIT_OR: case Token::BIT_OR:
__ or_(answer.reg(), Operand(right->reg())); __ or_(answer.reg(), Operand(right->reg()));
__ test(answer.reg(), Immediate(kSmiTagMask));
__ j(not_zero, deferred->NonSmiInputLabel());
break; break;
case Token::BIT_AND: case Token::BIT_AND:
@ -1878,6 +2090,7 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
UNREACHABLE(); UNREACHABLE();
break; break;
} }
deferred->BindExit(); deferred->BindExit();
left->Unuse(); left->Unuse();
right->Unuse(); right->Unuse();
@ -2363,27 +2576,25 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
case Token::BIT_XOR: case Token::BIT_XOR:
case Token::BIT_AND: { case Token::BIT_AND: {
operand->ToRegister(); operand->ToRegister();
// DeferredInlineBinaryOperation requires all the registers that it is
// told about to be spilled.
frame_->Spill(operand->reg()); frame_->Spill(operand->reg());
DeferredCode* deferred = NULL; DeferredInlineBinaryOperation* deferred = NULL;
if (reversed) {
deferred =
new DeferredInlineSmiOperationReversed(op,
operand->reg(),
smi_value,
operand->reg(),
operand->type_info(),
overwrite_mode);
} else {
deferred = new DeferredInlineSmiOperation(op,
operand->reg(),
operand->reg(),
operand->type_info(),
smi_value,
overwrite_mode);
}
if (!operand->type_info().IsSmi()) { if (!operand->type_info().IsSmi()) {
Result left = allocator()->Allocate();
ASSERT(left.is_valid());
Result right = allocator()->Allocate();
ASSERT(right.is_valid());
deferred = new DeferredInlineBinaryOperation(
op,
operand->reg(),
left.reg(),
right.reg(),
operand->type_info(),
TypeInfo::Smi(),
overwrite_mode == NO_OVERWRITE ? NO_OVERWRITE : OVERWRITE_LEFT);
__ test(operand->reg(), Immediate(kSmiTagMask)); __ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero); deferred->JumpToConstantRhs(not_zero, smi_value);
} else if (FLAG_debug_code) { } else if (FLAG_debug_code) {
__ AbortIfNotSmi(operand->reg()); __ AbortIfNotSmi(operand->reg());
} }
@ -2399,7 +2610,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
__ or_(Operand(operand->reg()), Immediate(value)); __ or_(Operand(operand->reg()), Immediate(value));
} }
} }
deferred->BindExit(); if (deferred != NULL) deferred->BindExit();
answer = *operand; answer = *operand;
break; break;
} }
@ -3212,10 +3423,8 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
__ j(zero, &build_args); __ j(zero, &build_args);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx); __ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &build_args); __ j(not_equal, &build_args);
__ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply)); Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
__ cmp(FieldOperand(ecx, SharedFunctionInfo::kCodeOffset), __ cmp(FieldOperand(eax, JSFunction::kCodeOffset), Immediate(apply_code));
Immediate(apply_code));
__ j(not_equal, &build_args); __ j(not_equal, &build_args);
// Check that applicand is a function. // Check that applicand is a function.
@ -4389,7 +4598,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
__ mov(ebx, Operand(eax)); __ mov(ebx, Operand(eax));
// If the property has been removed while iterating, we just skip it. // If the property has been removed while iterating, we just skip it.
__ cmp(ebx, Factory::null_value()); __ test(ebx, Operand(ebx));
node->continue_target()->Branch(equal); node->continue_target()->Branch(equal);
end_del_check.Bind(); end_del_check.Bind();
@ -4397,10 +4606,11 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// loop. edx: i'th entry of the enum cache (or string there of) // loop. edx: i'th entry of the enum cache (or string there of)
frame_->EmitPush(ebx); frame_->EmitPush(ebx);
{ Reference each(this, node->each()); { Reference each(this, node->each());
// Loading a reference may leave the frame in an unspilled state.
frame_->SpillAll();
if (!each.is_illegal()) { if (!each.is_illegal()) {
if (each.size() > 0) { if (each.size() > 0) {
// Loading a reference may leave the frame in an unspilled state.
frame_->SpillAll();
// Get the value (under the reference on the stack) from memory.
frame_->EmitPush(frame_->ElementAt(each.size())); frame_->EmitPush(frame_->ElementAt(each.size()));
each.SetValue(NOT_CONST_INIT); each.SetValue(NOT_CONST_INIT);
frame_->Drop(2); frame_->Drop(2);
@ -6539,7 +6749,7 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
} }
void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) { void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
// This generates a fast version of: // This generates a fast version of:
// (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' || // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
// typeof(arg) == function). // typeof(arg) == function).
@ -6560,6 +6770,143 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
} }
// Deferred code to check whether the String JavaScript object is safe for using
// default value of. This code is called after the bit caching this information
// in the map has been checked with the map for the object in the map_result_
// register. On return the register map_result_ contains 1 for true and 0 for
// false.
class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
public:
DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
Register map_result,
Register scratch1,
Register scratch2)
: object_(object),
map_result_(map_result),
scratch1_(scratch1),
scratch2_(scratch2) { }
virtual void Generate() {
Label false_result;
// Check that map is loaded as expected.
if (FLAG_debug_code) {
__ cmp(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
__ Assert(equal, "Map not in expected register");
}
// Check for fast case object. Generate false result for slow case object.
__ mov(scratch1_, FieldOperand(object_, JSObject::kPropertiesOffset));
__ mov(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
__ cmp(scratch1_, Factory::hash_table_map());
__ j(equal, &false_result);
// Look for valueOf symbol in the descriptor array, and indicate false if
// found. The type is not checked, so if it is a transition it is a false
// negative.
__ mov(map_result_,
FieldOperand(map_result_, Map::kInstanceDescriptorsOffset));
__ mov(scratch1_, FieldOperand(map_result_, FixedArray::kLengthOffset));
// map_result_: descriptor array
// scratch1_: length of descriptor array
// Calculate the end of the descriptor array.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kPointerSize == 4);
__ lea(scratch1_,
Operand(map_result_, scratch1_, times_2, FixedArray::kHeaderSize));
// Calculate location of the first key name.
__ add(Operand(map_result_),
Immediate(FixedArray::kHeaderSize +
DescriptorArray::kFirstIndex * kPointerSize));
// Loop through all the keys in the descriptor array. If one of these is the
// symbol valueOf the result is false.
Label entry, loop;
__ jmp(&entry);
__ bind(&loop);
__ mov(scratch2_, FieldOperand(map_result_, 0));
__ cmp(scratch2_, Factory::value_of_symbol());
__ j(equal, &false_result);
__ add(Operand(map_result_), Immediate(kPointerSize));
__ bind(&entry);
__ cmp(map_result_, Operand(scratch1_));
__ j(not_equal, &loop);
// Reload map as register map_result_ was used as temporary above.
__ mov(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
// If a valueOf property is not found on the object check that it's
// prototype is the un-modified String prototype. If not result is false.
__ mov(scratch1_, FieldOperand(map_result_, Map::kPrototypeOffset));
__ test(scratch1_, Immediate(kSmiTagMask));
__ j(zero, &false_result);
__ mov(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
__ mov(scratch2_, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(scratch2_,
FieldOperand(scratch2_, GlobalObject::kGlobalContextOffset));
__ cmp(scratch1_,
CodeGenerator::ContextOperand(
scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
__ j(not_equal, &false_result);
// Set the bit in the map to indicate that it has been checked safe for
// default valueOf and set true result.
__ or_(FieldOperand(map_result_, Map::kBitField2Offset),
Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ Set(map_result_, Immediate(1));
__ jmp(exit_label());
__ bind(&false_result);
// Set false result.
__ Set(map_result_, Immediate(0));
}
private:
Register object_;
Register map_result_;
Register scratch1_;
Register scratch2_;
};
void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
Result obj = frame_->Pop(); // Pop the string wrapper.
obj.ToRegister();
ASSERT(obj.is_valid());
if (FLAG_debug_code) {
__ AbortIfSmi(obj.reg());
}
// Check whether this map has already been checked to be safe for default
// valueOf.
Result map_result = allocator()->Allocate();
ASSERT(map_result.is_valid());
__ mov(map_result.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
__ test_b(FieldOperand(map_result.reg(), Map::kBitField2Offset),
1 << Map::kStringWrapperSafeForDefaultValueOf);
destination()->true_target()->Branch(not_zero);
// We need an additional two scratch registers for the deferred code.
Result temp1 = allocator()->Allocate();
ASSERT(temp1.is_valid());
Result temp2 = allocator()->Allocate();
ASSERT(temp2.is_valid());
DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
new DeferredIsStringWrapperSafeForDefaultValueOf(
obj.reg(), map_result.reg(), temp1.reg(), temp2.reg());
deferred->Branch(zero);
deferred->BindExit();
__ test(map_result.reg(), Operand(map_result.reg()));
obj.Unuse();
map_result.Unuse();
temp1.Unuse();
temp2.Unuse();
destination()->Split(not_equal);
}
void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) { void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
// This generates a fast version of: // This generates a fast version of:
// (%_ClassOf(arg) === 'Function') // (%_ClassOf(arg) === 'Function')
@ -9467,6 +9814,11 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ mov(FieldOperand(eax, JSFunction::kContextOffset), esi); __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
__ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx); __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
__ mov(FieldOperand(eax, JSFunction::kCodeOffset), edx);
// Return and remove the on-stack parameter. // Return and remove the on-stack parameter.
__ ret(1 * kPointerSize); __ ret(1 * kPointerSize);

33
deps/v8/src/ia32/codegen-ia32.h

@ -358,6 +358,10 @@ class CodeGenerator: public AstVisitor {
return FieldOperand(array, index_as_smi, times_half_pointer_size, offset); return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
} }
static Operand ContextOperand(Register context, int index) {
return Operand(context, Context::SlotOffset(index));
}
private: private:
// Construction/Destruction // Construction/Destruction
explicit CodeGenerator(MacroAssembler* masm); explicit CodeGenerator(MacroAssembler* masm);
@ -430,10 +434,6 @@ class CodeGenerator: public AstVisitor {
// The following are used by class Reference. // The following are used by class Reference.
void LoadReference(Reference* ref); void LoadReference(Reference* ref);
static Operand ContextOperand(Register context, int index) {
return Operand(context, Context::SlotOffset(index));
}
Operand SlotOperand(Slot* slot, Register tmp); Operand SlotOperand(Slot* slot, Register tmp);
Operand ContextSlotOperandCheckExtensions(Slot* slot, Operand ContextSlotOperandCheckExtensions(Slot* slot,
@ -530,7 +530,7 @@ class CodeGenerator: public AstVisitor {
// Emits code sequence that jumps to deferred code if the inputs // Emits code sequence that jumps to deferred code if the inputs
// are not both smis. Cannot be in MacroAssembler because it takes // are not both smis. Cannot be in MacroAssembler because it takes
// advantage of TypeInfo to skip unneeded checks. // a deferred code object.
void JumpIfNotBothSmiUsingTypeInfo(Register left, void JumpIfNotBothSmiUsingTypeInfo(Register left,
Register right, Register right,
Register scratch, Register scratch,
@ -538,6 +538,15 @@ class CodeGenerator: public AstVisitor {
TypeInfo right_info, TypeInfo right_info,
DeferredCode* deferred); DeferredCode* deferred);
// Emits code sequence that jumps to the label if the inputs
// are not both smis.
void JumpIfNotBothSmiUsingTypeInfo(Register left,
Register right,
Register scratch,
TypeInfo left_info,
TypeInfo right_info,
Label* on_non_smi);
// If possible, combine two constant smi values using op to produce // If possible, combine two constant smi values using op to produce
// a smi result, and push it on the virtual frame, all at compile time. // a smi result, and push it on the virtual frame, all at compile time.
// Returns true if it succeeds. Otherwise it has no effect. // Returns true if it succeeds. Otherwise it has no effect.
@ -644,6 +653,8 @@ class CodeGenerator: public AstVisitor {
void GenerateIsSpecObject(ZoneList<Expression*>* args); void GenerateIsSpecObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args); void GenerateIsFunction(ZoneList<Expression*>* args);
void GenerateIsUndetectableObject(ZoneList<Expression*>* args); void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
void GenerateIsStringWrapperSafeForDefaultValueOf(
ZoneList<Expression*>* args);
// Support for construct call checks. // Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args); void GenerateIsConstructCall(ZoneList<Expression*>* args);
@ -802,6 +813,18 @@ class TranscendentalCacheStub: public CodeStub {
}; };
class ToBooleanStub: public CodeStub {
public:
ToBooleanStub() { }
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return ToBoolean; }
int MinorKey() { return 0; }
};
// Flag that indicates how to generate code for the stub GenericBinaryOpStub. // Flag that indicates how to generate code for the stub GenericBinaryOpStub.
enum GenericBinaryFlags { enum GenericBinaryFlags {
NO_GENERIC_BINARY_FLAGS = 0, NO_GENERIC_BINARY_FLAGS = 0,

44
deps/v8/src/ia32/debug-ia32.cc

@ -254,32 +254,20 @@ void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
} }
// FrameDropper is a code replacement for a JavaScript frame with possibly
// several frames above.
// There is no calling conventions here, because it never actually gets called,
// it only gets returned to.
// Frame structure (conforms InternalFrame structure):
// -- JSFunction
// -- code
// -- SMI maker
// -- context
// -- frame base
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
ExternalReference restarter_frame_function_slot = ExternalReference restarter_frame_function_slot =
ExternalReference(Debug_Address::RestarterFrameFunctionPointer()); ExternalReference(Debug_Address::RestarterFrameFunctionPointer());
__ mov(Operand::StaticVariable(restarter_frame_function_slot), Immediate(0)); __ mov(Operand::StaticVariable(restarter_frame_function_slot), Immediate(0));
// We do not know our frame height, but set esp based on ebp. // We do not know our frame height, but set esp based on ebp.
__ lea(esp, Operand(ebp, -4 * kPointerSize)); __ lea(esp, Operand(ebp, -1 * kPointerSize));
__ pop(edi); // function __ pop(edi); // Function.
// Skip code self-reference and marker.
__ add(Operand(esp), Immediate(2 * kPointerSize));
__ pop(esi); // Context.
__ pop(ebp); __ pop(ebp);
// Load context from the function.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Get function code. // Get function code.
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
@ -289,27 +277,9 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ jmp(Operand(edx)); __ jmp(Operand(edx));
} }
#undef __ const bool Debug::kFrameDropperSupported = true;
// TODO(LiveEdit): consider making it platform-independent.
// TODO(LiveEdit): use more named constants instead of numbers.
Object** Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
Handle<Code> code) {
ASSERT(bottom_js_frame->is_java_script());
Address fp = bottom_js_frame->fp();
Memory::Object_at(fp - 4 * kPointerSize) =
Memory::Object_at(fp - 2 * kPointerSize); // Move edi (function).
Memory::Object_at(fp - 3 * kPointerSize) = *code;
Memory::Object_at(fp - 2 * kPointerSize) = Smi::FromInt(StackFrame::INTERNAL);
return reinterpret_cast<Object**>(&Memory::Object_at(fp - 4 * kPointerSize));
}
const int Debug::kFrameDropperFrameSize = 5;
#undef __
#endif // ENABLE_DEBUGGER_SUPPORT #endif // ENABLE_DEBUGGER_SUPPORT

1
deps/v8/src/ia32/disasm-ia32.cc

@ -560,6 +560,7 @@ int DisassemblerIA32::D1D3C1Instruction(byte* data) {
case kROL: mnem = "rol"; break; case kROL: mnem = "rol"; break;
case kROR: mnem = "ror"; break; case kROR: mnem = "ror"; break;
case kRCL: mnem = "rcl"; break; case kRCL: mnem = "rcl"; break;
case kRCR: mnem = "rcr"; break;
case kSHL: mnem = "shl"; break; case kSHL: mnem = "shl"; break;
case KSHR: mnem = "shr"; break; case KSHR: mnem = "shr"; break;
case kSAR: mnem = "sar"; break; case kSAR: mnem = "sar"; break;

954
deps/v8/src/ia32/fast-codegen-ia32.cc

@ -1,954 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if defined(V8_TARGET_ARCH_IA32)
#include "codegen-inl.h"
#include "fast-codegen.h"
#include "data-flow.h"
#include "scopes.h"
namespace v8 {
namespace internal {
#define BAILOUT(reason) \
do { \
if (FLAG_trace_bailout) { \
PrintF("%s\n", reason); \
} \
has_supported_syntax_ = false; \
return; \
} while (false)
#define CHECK_BAILOUT \
do { \
if (!has_supported_syntax_) return; \
} while (false)
void FastCodeGenSyntaxChecker::Check(CompilationInfo* info) {
info_ = info;
// We do not specialize if we do not have a receiver or if it is not a
// JS object with fast mode properties.
if (!info->has_receiver()) BAILOUT("No receiver");
if (!info->receiver()->IsJSObject()) BAILOUT("Receiver is not an object");
Handle<JSObject> object = Handle<JSObject>::cast(info->receiver());
if (!object->HasFastProperties()) BAILOUT("Receiver is in dictionary mode");
// We do not support stack or heap slots (both of which require
// allocation).
Scope* scope = info->scope();
if (scope->num_stack_slots() > 0) {
BAILOUT("Function has stack-allocated locals");
}
if (scope->num_heap_slots() > 0) {
BAILOUT("Function has context-allocated locals");
}
VisitDeclarations(scope->declarations());
CHECK_BAILOUT;
// We do not support empty function bodies.
if (info->function()->body()->is_empty()) {
BAILOUT("Function has an empty body");
}
VisitStatements(info->function()->body());
}
void FastCodeGenSyntaxChecker::VisitDeclarations(
ZoneList<Declaration*>* decls) {
if (!decls->is_empty()) BAILOUT("Function has declarations");
}
void FastCodeGenSyntaxChecker::VisitStatements(ZoneList<Statement*>* stmts) {
if (stmts->length() != 1) {
BAILOUT("Function body is not a singleton statement.");
}
Visit(stmts->at(0));
}
void FastCodeGenSyntaxChecker::VisitDeclaration(Declaration* decl) {
UNREACHABLE();
}
void FastCodeGenSyntaxChecker::VisitBlock(Block* stmt) {
VisitStatements(stmt->statements());
}
void FastCodeGenSyntaxChecker::VisitExpressionStatement(
ExpressionStatement* stmt) {
Visit(stmt->expression());
}
void FastCodeGenSyntaxChecker::VisitEmptyStatement(EmptyStatement* stmt) {
// Supported.
}
void FastCodeGenSyntaxChecker::VisitIfStatement(IfStatement* stmt) {
BAILOUT("IfStatement");
}
void FastCodeGenSyntaxChecker::VisitContinueStatement(ContinueStatement* stmt) {
BAILOUT("Continuestatement");
}
void FastCodeGenSyntaxChecker::VisitBreakStatement(BreakStatement* stmt) {
BAILOUT("BreakStatement");
}
void FastCodeGenSyntaxChecker::VisitReturnStatement(ReturnStatement* stmt) {
BAILOUT("ReturnStatement");
}
void FastCodeGenSyntaxChecker::VisitWithEnterStatement(
WithEnterStatement* stmt) {
BAILOUT("WithEnterStatement");
}
void FastCodeGenSyntaxChecker::VisitWithExitStatement(WithExitStatement* stmt) {
BAILOUT("WithExitStatement");
}
void FastCodeGenSyntaxChecker::VisitSwitchStatement(SwitchStatement* stmt) {
BAILOUT("SwitchStatement");
}
void FastCodeGenSyntaxChecker::VisitDoWhileStatement(DoWhileStatement* stmt) {
BAILOUT("DoWhileStatement");
}
void FastCodeGenSyntaxChecker::VisitWhileStatement(WhileStatement* stmt) {
BAILOUT("WhileStatement");
}
void FastCodeGenSyntaxChecker::VisitForStatement(ForStatement* stmt) {
BAILOUT("ForStatement");
}
void FastCodeGenSyntaxChecker::VisitForInStatement(ForInStatement* stmt) {
BAILOUT("ForInStatement");
}
void FastCodeGenSyntaxChecker::VisitTryCatchStatement(TryCatchStatement* stmt) {
BAILOUT("TryCatchStatement");
}
void FastCodeGenSyntaxChecker::VisitTryFinallyStatement(
TryFinallyStatement* stmt) {
BAILOUT("TryFinallyStatement");
}
void FastCodeGenSyntaxChecker::VisitDebuggerStatement(
DebuggerStatement* stmt) {
BAILOUT("DebuggerStatement");
}
void FastCodeGenSyntaxChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
BAILOUT("FunctionLiteral");
}
void FastCodeGenSyntaxChecker::VisitSharedFunctionInfoLiteral(
SharedFunctionInfoLiteral* expr) {
BAILOUT("SharedFunctionInfoLiteral");
}
void FastCodeGenSyntaxChecker::VisitConditional(Conditional* expr) {
BAILOUT("Conditional");
}
void FastCodeGenSyntaxChecker::VisitSlot(Slot* expr) {
UNREACHABLE();
}
void FastCodeGenSyntaxChecker::VisitVariableProxy(VariableProxy* expr) {
// Only global variable references are supported.
Variable* var = expr->var();
if (!var->is_global() || var->is_this()) BAILOUT("Non-global variable");
// Check if the global variable is existing and non-deletable.
if (info()->has_global_object()) {
LookupResult lookup;
info()->global_object()->Lookup(*expr->name(), &lookup);
if (!lookup.IsProperty()) {
BAILOUT("Non-existing global variable");
}
// We do not handle global variables with accessors or interceptors.
if (lookup.type() != NORMAL) {
BAILOUT("Global variable with accessors or interceptors.");
}
// We do not handle deletable global variables.
if (!lookup.IsDontDelete()) {
BAILOUT("Deletable global variable");
}
}
}
void FastCodeGenSyntaxChecker::VisitLiteral(Literal* expr) {
BAILOUT("Literal");
}
void FastCodeGenSyntaxChecker::VisitRegExpLiteral(RegExpLiteral* expr) {
BAILOUT("RegExpLiteral");
}
void FastCodeGenSyntaxChecker::VisitObjectLiteral(ObjectLiteral* expr) {
BAILOUT("ObjectLiteral");
}
void FastCodeGenSyntaxChecker::VisitArrayLiteral(ArrayLiteral* expr) {
BAILOUT("ArrayLiteral");
}
void FastCodeGenSyntaxChecker::VisitCatchExtensionObject(
CatchExtensionObject* expr) {
BAILOUT("CatchExtensionObject");
}
void FastCodeGenSyntaxChecker::VisitAssignment(Assignment* expr) {
// Simple assignments to (named) this properties are supported.
if (expr->op() != Token::ASSIGN) BAILOUT("Non-simple assignment");
Property* prop = expr->target()->AsProperty();
if (prop == NULL) BAILOUT("Non-property assignment");
VariableProxy* proxy = prop->obj()->AsVariableProxy();
if (proxy == NULL || !proxy->var()->is_this()) {
BAILOUT("Non-this-property assignment");
}
if (!prop->key()->IsPropertyName()) {
BAILOUT("Non-named-property assignment");
}
// We will only specialize for fields on the object itself.
// Expression::IsPropertyName implies that the name is a literal
// symbol but we do not assume that.
Literal* key = prop->key()->AsLiteral();
if (key != NULL && key->handle()->IsString()) {
Handle<Object> receiver = info()->receiver();
Handle<String> name = Handle<String>::cast(key->handle());
LookupResult lookup;
receiver->Lookup(*name, &lookup);
if (!lookup.IsProperty()) {
BAILOUT("Assigned property not found at compile time");
}
if (lookup.holder() != *receiver) BAILOUT("Non-own property assignment");
if (!lookup.type() == FIELD) BAILOUT("Non-field property assignment");
} else {
UNREACHABLE();
BAILOUT("Unexpected non-string-literal property key");
}
Visit(expr->value());
}
void FastCodeGenSyntaxChecker::VisitThrow(Throw* expr) {
BAILOUT("Throw");
}
void FastCodeGenSyntaxChecker::VisitProperty(Property* expr) {
// We support named this property references.
VariableProxy* proxy = expr->obj()->AsVariableProxy();
if (proxy == NULL || !proxy->var()->is_this()) {
BAILOUT("Non-this-property reference");
}
if (!expr->key()->IsPropertyName()) {
BAILOUT("Non-named-property reference");
}
// We will only specialize for fields on the object itself.
// Expression::IsPropertyName implies that the name is a literal
// symbol but we do not assume that.
Literal* key = expr->key()->AsLiteral();
if (key != NULL && key->handle()->IsString()) {
Handle<Object> receiver = info()->receiver();
Handle<String> name = Handle<String>::cast(key->handle());
LookupResult lookup;
receiver->Lookup(*name, &lookup);
if (!lookup.IsProperty()) {
BAILOUT("Referenced property not found at compile time");
}
if (lookup.holder() != *receiver) BAILOUT("Non-own property reference");
if (!lookup.type() == FIELD) BAILOUT("Non-field property reference");
} else {
UNREACHABLE();
BAILOUT("Unexpected non-string-literal property key");
}
}
void FastCodeGenSyntaxChecker::VisitCall(Call* expr) {
BAILOUT("Call");
}
void FastCodeGenSyntaxChecker::VisitCallNew(CallNew* expr) {
BAILOUT("CallNew");
}
void FastCodeGenSyntaxChecker::VisitCallRuntime(CallRuntime* expr) {
BAILOUT("CallRuntime");
}
void FastCodeGenSyntaxChecker::VisitUnaryOperation(UnaryOperation* expr) {
BAILOUT("UnaryOperation");
}
void FastCodeGenSyntaxChecker::VisitCountOperation(CountOperation* expr) {
BAILOUT("CountOperation");
}
void FastCodeGenSyntaxChecker::VisitBinaryOperation(BinaryOperation* expr) {
// We support bitwise OR.
switch (expr->op()) {
case Token::COMMA:
BAILOUT("BinaryOperation COMMA");
case Token::OR:
BAILOUT("BinaryOperation OR");
case Token::AND:
BAILOUT("BinaryOperation AND");
case Token::BIT_OR:
// We support expressions nested on the left because they only require
// a pair of registers to keep all intermediate values in registers
// (i.e., the expression stack has height no more than two).
if (!expr->right()->IsLeaf()) BAILOUT("expression nested on right");
// We do not allow subexpressions with side effects because we
// (currently) bail out to the beginning of the full function. The
// only expressions with side effects that we would otherwise handle
// are assignments.
if (expr->left()->AsAssignment() != NULL ||
expr->right()->AsAssignment() != NULL) {
BAILOUT("subexpression of binary operation has side effects");
}
Visit(expr->left());
CHECK_BAILOUT;
Visit(expr->right());
break;
case Token::BIT_XOR:
BAILOUT("BinaryOperation BIT_XOR");
case Token::BIT_AND:
BAILOUT("BinaryOperation BIT_AND");
case Token::SHL:
BAILOUT("BinaryOperation SHL");
case Token::SAR:
BAILOUT("BinaryOperation SAR");
case Token::SHR:
BAILOUT("BinaryOperation SHR");
case Token::ADD:
BAILOUT("BinaryOperation ADD");
case Token::SUB:
BAILOUT("BinaryOperation SUB");
case Token::MUL:
BAILOUT("BinaryOperation MUL");
case Token::DIV:
BAILOUT("BinaryOperation DIV");
case Token::MOD:
BAILOUT("BinaryOperation MOD");
default:
UNREACHABLE();
}
}
void FastCodeGenSyntaxChecker::VisitCompareOperation(CompareOperation* expr) {
BAILOUT("CompareOperation");
}
void FastCodeGenSyntaxChecker::VisitThisFunction(ThisFunction* expr) {
BAILOUT("ThisFunction");
}
#undef BAILOUT
#undef CHECK_BAILOUT
#define __ ACCESS_MASM(masm())
Handle<Code> FastCodeGenerator::MakeCode(CompilationInfo* info) {
// Label the AST before calling MakeCodePrologue, so AST node numbers are
// printed with the AST.
AstLabeler labeler;
labeler.Label(info);
CodeGenerator::MakeCodePrologue(info);
const int kInitialBufferSize = 4 * KB;
MacroAssembler masm(NULL, kInitialBufferSize);
// Generate the fast-path code.
FastCodeGenerator fast_cgen(&masm);
fast_cgen.Generate(info);
if (fast_cgen.HasStackOverflow()) {
ASSERT(!Top::has_pending_exception());
return Handle<Code>::null();
}
// Generate the full code for the function in bailout mode, using the same
// macro assembler.
CodeGenerator cgen(&masm);
CodeGeneratorScope scope(&cgen);
info->set_mode(CompilationInfo::SECONDARY);
cgen.Generate(info);
if (cgen.HasStackOverflow()) {
ASSERT(!Top::has_pending_exception());
return Handle<Code>::null();
}
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
return CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
}
Register FastCodeGenerator::accumulator0() { return eax; }
Register FastCodeGenerator::accumulator1() { return edx; }
Register FastCodeGenerator::scratch0() { return ecx; }
Register FastCodeGenerator::scratch1() { return edi; }
Register FastCodeGenerator::receiver_reg() { return ebx; }
Register FastCodeGenerator::context_reg() { return esi; }
void FastCodeGenerator::EmitLoadReceiver() {
// Offset 2 is due to return address and saved frame pointer.
int index = 2 + function()->scope()->num_parameters();
__ mov(receiver_reg(), Operand(ebp, index * kPointerSize));
}
void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> cell) {
ASSERT(!destination().is(no_reg));
ASSERT(cell->IsJSGlobalPropertyCell());
__ mov(destination(), Immediate(cell));
__ mov(destination(),
FieldOperand(destination(), JSGlobalPropertyCell::kValueOffset));
if (FLAG_debug_code) {
__ cmp(destination(), Factory::the_hole_value());
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
// The loaded value is not known to be a smi.
clear_as_smi(destination());
}
void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
LookupResult lookup;
info()->receiver()->Lookup(*name, &lookup);
ASSERT(lookup.holder() == *info()->receiver());
ASSERT(lookup.type() == FIELD);
Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
int index = lookup.GetFieldIndex() - map->inobject_properties();
int offset = index * kPointerSize;
// We will emit the write barrier unless the stored value is statically
// known to be a smi.
bool needs_write_barrier = !is_smi(accumulator0());
// Perform the store. Negative offsets are inobject properties.
if (offset < 0) {
offset += map->instance_size();
__ mov(FieldOperand(receiver_reg(), offset), accumulator0());
if (needs_write_barrier) {
// Preserve receiver from write barrier.
__ mov(scratch0(), receiver_reg());
}
} else {
offset += FixedArray::kHeaderSize;
__ mov(scratch0(),
FieldOperand(receiver_reg(), JSObject::kPropertiesOffset));
__ mov(FieldOperand(scratch0(), offset), accumulator0());
}
if (needs_write_barrier) {
if (destination().is(no_reg)) {
// After RecordWrite accumulator0 is only accidently a smi, but it is
// already marked as not known to be one.
__ RecordWrite(scratch0(), offset, accumulator0(), scratch1());
} else {
// Copy the value to the other accumulator to preserve a copy from the
// write barrier. One of the accumulators is available as a scratch
// register. Neither is a smi.
__ mov(accumulator1(), accumulator0());
clear_as_smi(accumulator1());
Register value_scratch = other_accumulator(destination());
__ RecordWrite(scratch0(), offset, value_scratch, scratch1());
}
} else if (destination().is(accumulator1())) {
__ mov(accumulator1(), accumulator0());
// Is a smi because we do not need the write barrier.
set_as_smi(accumulator1());
}
}
void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
ASSERT(!destination().is(no_reg));
LookupResult lookup;
info()->receiver()->Lookup(*name, &lookup);
ASSERT(lookup.holder() == *info()->receiver());
ASSERT(lookup.type() == FIELD);
Handle<Map> map(Handle<HeapObject>::cast(info()->receiver())->map());
int index = lookup.GetFieldIndex() - map->inobject_properties();
int offset = index * kPointerSize;
// Perform the load. Negative offsets are inobject properties.
if (offset < 0) {
offset += map->instance_size();
__ mov(destination(), FieldOperand(receiver_reg(), offset));
} else {
offset += FixedArray::kHeaderSize;
__ mov(scratch0(),
FieldOperand(receiver_reg(), JSObject::kPropertiesOffset));
__ mov(destination(), FieldOperand(scratch0(), offset));
}
// The loaded value is not known to be a smi.
clear_as_smi(destination());
}
void FastCodeGenerator::EmitBitOr() {
if (is_smi(accumulator0()) && is_smi(accumulator1())) {
// If both operands are known to be a smi then there is no need to check
// the operands or result. There is no need to perform the operation in
// an effect context.
if (!destination().is(no_reg)) {
// Leave the result in the destination register. Bitwise or is
// commutative.
__ or_(destination(), Operand(other_accumulator(destination())));
}
} else {
// Left is in accumulator1, right in accumulator0.
Label* bailout = NULL;
if (destination().is(accumulator0())) {
__ mov(scratch0(), accumulator0());
__ or_(destination(), Operand(accumulator1())); // Or is commutative.
__ test(destination(), Immediate(kSmiTagMask));
bailout = info()->AddBailout(accumulator1(), scratch0()); // Left, right.
} else if (destination().is(accumulator1())) {
__ mov(scratch0(), accumulator1());
__ or_(destination(), Operand(accumulator0()));
__ test(destination(), Immediate(kSmiTagMask));
bailout = info()->AddBailout(scratch0(), accumulator0());
} else {
ASSERT(destination().is(no_reg));
__ mov(scratch0(), accumulator1());
__ or_(scratch0(), Operand(accumulator0()));
__ test(scratch0(), Immediate(kSmiTagMask));
bailout = info()->AddBailout(accumulator1(), accumulator0());
}
__ j(not_zero, bailout, not_taken);
}
// If we didn't bailout, the result (in fact, both inputs too) is known to
// be a smi.
set_as_smi(accumulator0());
set_as_smi(accumulator1());
}
void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
ASSERT(info_ == NULL);
info_ = compilation_info;
Comment cmnt(masm_, "[ function compiled by fast code generator");
// Save the caller's frame pointer and set up our own.
Comment prologue_cmnt(masm(), ";; Prologue");
__ push(ebp);
__ mov(ebp, esp);
__ push(esi); // Context.
__ push(edi); // Closure.
// Note that we keep a live register reference to esi (context) at this
// point.
Label* bailout_to_beginning = info()->AddBailout();
// Receiver (this) is allocated to a fixed register.
if (info()->has_this_properties()) {
Comment cmnt(masm(), ";; MapCheck(this)");
if (FLAG_print_ir) {
PrintF("#: MapCheck(this)\n");
}
ASSERT(info()->has_receiver() && info()->receiver()->IsHeapObject());
Handle<HeapObject> object = Handle<HeapObject>::cast(info()->receiver());
Handle<Map> map(object->map());
EmitLoadReceiver();
__ CheckMap(receiver_reg(), map, bailout_to_beginning, false);
}
// If there is a global variable access check if the global object is the
// same as at lazy-compilation time.
if (info()->has_globals()) {
Comment cmnt(masm(), ";; MapCheck(GLOBAL)");
if (FLAG_print_ir) {
PrintF("#: MapCheck(GLOBAL)\n");
}
ASSERT(info()->has_global_object());
Handle<Map> map(info()->global_object()->map());
__ mov(scratch0(), CodeGenerator::GlobalObject());
__ CheckMap(scratch0(), map, bailout_to_beginning, true);
}
VisitStatements(function()->body());
Comment return_cmnt(masm(), ";; Return(<undefined>)");
if (FLAG_print_ir) {
PrintF("#: Return(<undefined>)\n");
}
__ mov(eax, Factory::undefined_value());
__ mov(esp, ebp);
__ pop(ebp);
__ ret((scope()->num_parameters() + 1) * kPointerSize);
}
void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
UNREACHABLE();
}
void FastCodeGenerator::VisitBlock(Block* stmt) {
VisitStatements(stmt->statements());
}
void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
Visit(stmt->expression());
}
void FastCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
// Nothing to do.
}
void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitForStatement(ForStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
UNREACHABLE();
}
void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitSharedFunctionInfoLiteral(
SharedFunctionInfoLiteral* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitConditional(Conditional* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitSlot(Slot* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
ASSERT(expr->var()->is_global() && !expr->var()->is_this());
// Check if we can compile a global variable load directly from the cell.
ASSERT(info()->has_global_object());
LookupResult lookup;
info()->global_object()->Lookup(*expr->name(), &lookup);
// We only support normal (non-accessor/interceptor) DontDelete properties
// for now.
ASSERT(lookup.IsProperty());
ASSERT_EQ(NORMAL, lookup.type());
ASSERT(lookup.IsDontDelete());
Handle<Object> cell(info()->global_object()->GetPropertyCell(&lookup));
// Global variable lookups do not have side effects, so we do not need to
// emit code if we are in an effect context.
if (!destination().is(no_reg)) {
Comment cmnt(masm(), ";; Global");
if (FLAG_print_ir) {
SmartPointer<char> name = expr->name()->ToCString();
PrintF("%d: t%d = Global(%s)\n", expr->num(),
expr->num(), *name);
}
EmitGlobalVariableLoad(cell);
}
}
void FastCodeGenerator::VisitLiteral(Literal* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitAssignment(Assignment* expr) {
// Known to be a simple this property assignment. Effectively a unary
// operation.
{ Register my_destination = destination();
set_destination(accumulator0());
Visit(expr->value());
set_destination(my_destination);
}
Property* prop = expr->target()->AsProperty();
ASSERT_NOT_NULL(prop);
ASSERT_NOT_NULL(prop->obj()->AsVariableProxy());
ASSERT(prop->obj()->AsVariableProxy()->var()->is_this());
ASSERT(prop->key()->IsPropertyName());
Handle<String> name =
Handle<String>::cast(prop->key()->AsLiteral()->handle());
Comment cmnt(masm(), ";; Store to this");
if (FLAG_print_ir) {
SmartPointer<char> name_string = name->ToCString();
PrintF("%d: ", expr->num());
if (!destination().is(no_reg)) PrintF("t%d = ", expr->num());
PrintF("Store(this, \"%s\", t%d)\n", *name_string,
expr->value()->num());
}
EmitThisPropertyStore(name);
}
void FastCodeGenerator::VisitThrow(Throw* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitProperty(Property* expr) {
ASSERT_NOT_NULL(expr->obj()->AsVariableProxy());
ASSERT(expr->obj()->AsVariableProxy()->var()->is_this());
ASSERT(expr->key()->IsPropertyName());
if (!destination().is(no_reg)) {
Handle<String> name =
Handle<String>::cast(expr->key()->AsLiteral()->handle());
Comment cmnt(masm(), ";; Load from this");
if (FLAG_print_ir) {
SmartPointer<char> name_string = name->ToCString();
PrintF("%d: t%d = Load(this, \"%s\")\n",
expr->num(), expr->num(), *name_string);
}
EmitThisPropertyLoad(name);
}
}
void FastCodeGenerator::VisitCall(Call* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitCallNew(CallNew* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
// We support limited binary operations: bitwise OR only allowed to be
// nested on the left.
ASSERT(expr->op() == Token::BIT_OR);
ASSERT(expr->right()->IsLeaf());
{ Register my_destination = destination();
set_destination(accumulator1());
Visit(expr->left());
set_destination(accumulator0());
Visit(expr->right());
set_destination(my_destination);
}
Comment cmnt(masm(), ";; BIT_OR");
if (FLAG_print_ir) {
PrintF("%d: ", expr->num());
if (!destination().is(no_reg)) PrintF("t%d = ", expr->num());
PrintF("BIT_OR(t%d, t%d)\n", expr->left()->num(), expr->right()->num());
}
EmitBitOr();
}
void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
UNREACHABLE();
}
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32

155
deps/v8/src/ia32/fast-codegen-ia32.h

@ -1,155 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_FAST_CODEGEN_IA32_H_
#define V8_FAST_CODEGEN_IA32_H_
#include "v8.h"
#include "ast.h"
#include "compiler.h"
#include "list.h"
namespace v8 {
namespace internal {
class FastCodeGenSyntaxChecker: public AstVisitor {
public:
explicit FastCodeGenSyntaxChecker()
: info_(NULL), has_supported_syntax_(true) {
}
void Check(CompilationInfo* info);
CompilationInfo* info() { return info_; }
bool has_supported_syntax() { return has_supported_syntax_; }
private:
void VisitDeclarations(ZoneList<Declaration*>* decls);
void VisitStatements(ZoneList<Statement*>* stmts);
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
CompilationInfo* info_;
bool has_supported_syntax_;
DISALLOW_COPY_AND_ASSIGN(FastCodeGenSyntaxChecker);
};
class FastCodeGenerator: public AstVisitor {
public:
explicit FastCodeGenerator(MacroAssembler* masm)
: masm_(masm), info_(NULL), destination_(no_reg), smi_bits_(0) {
}
static Handle<Code> MakeCode(CompilationInfo* info);
void Generate(CompilationInfo* compilation_info);
private:
MacroAssembler* masm() { return masm_; }
CompilationInfo* info() { return info_; }
Register destination() { return destination_; }
void set_destination(Register reg) { destination_ = reg; }
FunctionLiteral* function() { return info_->function(); }
Scope* scope() { return info_->scope(); }
// Platform-specific fixed registers, all guaranteed distinct.
Register accumulator0();
Register accumulator1();
Register scratch0();
Register scratch1();
Register receiver_reg();
Register context_reg();
Register other_accumulator(Register reg) {
ASSERT(reg.is(accumulator0()) || reg.is(accumulator1()));
return (reg.is(accumulator0())) ? accumulator1() : accumulator0();
}
// Flags are true if the respective register is statically known to hold a
// smi. We do not track every register, only the accumulator registers.
bool is_smi(Register reg) {
ASSERT(!reg.is(no_reg));
return (smi_bits_ & reg.bit()) != 0;
}
void set_as_smi(Register reg) {
ASSERT(!reg.is(no_reg));
smi_bits_ = smi_bits_ | reg.bit();
}
void clear_as_smi(Register reg) {
ASSERT(!reg.is(no_reg));
smi_bits_ = smi_bits_ & ~reg.bit();
}
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
// Emit code to load the receiver from the stack into receiver_reg.
void EmitLoadReceiver();
// Emit code to load a global variable directly from a global property
// cell into the destination register.
void EmitGlobalVariableLoad(Handle<Object> cell);
// Emit a store to an own property of this. The stored value is expected
// in accumulator0 and the receiver in receiver_reg. The receiver
// register is preserved and the result (the stored value) is left in the
// destination register.
void EmitThisPropertyStore(Handle<String> name);
// Emit a load from an own property of this. The receiver is expected in
// receiver_reg. The receiver register is preserved and the result is
// left in the destination register.
void EmitThisPropertyLoad(Handle<String> name);
// Emit a bitwise or operation. The left operand is in accumulator1 and
// the right is in accumulator0. The result should be left in the
// destination register.
void EmitBitOr();
MacroAssembler* masm_;
CompilationInfo* info_;
Register destination_;
uint32_t smi_bits_;
DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
};
} } // namespace v8::internal
#endif // V8_FAST_CODEGEN_IA32_H_

175
deps/v8/src/ia32/full-codegen-ia32.cc

@ -54,97 +54,95 @@ namespace internal {
// //
// The function builds a JS frame. Please see JavaScriptFrameConstants in // The function builds a JS frame. Please see JavaScriptFrameConstants in
// frames-ia32.h for its layout. // frames-ia32.h for its layout.
void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) { void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL); ASSERT(info_ == NULL);
info_ = info; info_ = info;
SetFunctionPosition(function()); SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator"); Comment cmnt(masm_, "[ function compiled by full code generator");
if (mode == PRIMARY) { __ push(ebp); // Caller's frame pointer.
__ push(ebp); // Caller's frame pointer. __ mov(ebp, esp);
__ mov(ebp, esp); __ push(esi); // Callee's context.
__ push(esi); // Callee's context. __ push(edi); // Callee's JS Function.
__ push(edi); // Callee's JS Function.
{ Comment cmnt(masm_, "[ Allocate locals");
{ Comment cmnt(masm_, "[ Allocate locals"); int locals_count = scope()->num_stack_slots();
int locals_count = scope()->num_stack_slots(); if (locals_count == 1) {
if (locals_count == 1) { __ push(Immediate(Factory::undefined_value()));
__ push(Immediate(Factory::undefined_value())); } else if (locals_count > 1) {
} else if (locals_count > 1) { __ mov(eax, Immediate(Factory::undefined_value()));
__ mov(eax, Immediate(Factory::undefined_value())); for (int i = 0; i < locals_count; i++) {
for (int i = 0; i < locals_count; i++) { __ push(eax);
__ push(eax);
}
} }
} }
}
bool function_in_register = true; bool function_in_register = true;
// Possibly allocate a local context. // Possibly allocate a local context.
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) { if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context"); Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in edi. // Argument to NewContext is the function, which is still in edi.
__ push(edi); __ push(edi);
if (heap_slots <= FastNewContextStub::kMaximumSlots) { if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots); FastNewContextStub stub(heap_slots);
__ CallStub(&stub); __ CallStub(&stub);
} else { } else {
__ CallRuntime(Runtime::kNewContext, 1); __ CallRuntime(Runtime::kNewContext, 1);
} }
function_in_register = false; function_in_register = false;
// Context is returned in both eax and esi. It replaces the context // Context is returned in both eax and esi. It replaces the context
// passed to us. It's saved in the stack and kept live in esi. // passed to us. It's saved in the stack and kept live in esi.
__ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi); __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
// Copy parameters into context if necessary. // Copy parameters into context if necessary.
int num_parameters = scope()->num_parameters(); int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) { for (int i = 0; i < num_parameters; i++) {
Slot* slot = scope()->parameter(i)->slot(); Slot* slot = scope()->parameter(i)->slot();
if (slot != NULL && slot->type() == Slot::CONTEXT) { if (slot != NULL && slot->type() == Slot::CONTEXT) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset + int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize; (num_parameters - 1 - i) * kPointerSize;
// Load parameter from stack. // Load parameter from stack.
__ mov(eax, Operand(ebp, parameter_offset)); __ mov(eax, Operand(ebp, parameter_offset));
// Store it in the context. // Store it in the context.
int context_offset = Context::SlotOffset(slot->index()); int context_offset = Context::SlotOffset(slot->index());
__ mov(Operand(esi, context_offset), eax); __ mov(Operand(esi, context_offset), eax);
// Update the write barrier. This clobbers all involved // Update the write barrier. This clobbers all involved
// registers, so we have use a third register to avoid // registers, so we have use a third register to avoid
// clobbering esi. // clobbering esi.
__ mov(ecx, esi); __ mov(ecx, esi);
__ RecordWrite(ecx, context_offset, eax, ebx); __ RecordWrite(ecx, context_offset, eax, ebx);
}
} }
} }
}
Variable* arguments = scope()->arguments()->AsVariable(); Variable* arguments = scope()->arguments()->AsVariable();
if (arguments != NULL) { if (arguments != NULL) {
// Function uses arguments object. // Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object"); Comment cmnt(masm_, "[ Allocate arguments object");
if (function_in_register) { if (function_in_register) {
__ push(edi); __ push(edi);
} else { } else {
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
// Receiver is just before the parameters on the caller's stack.
int offset = scope()->num_parameters() * kPointerSize;
__ lea(edx,
Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
__ push(edx);
__ push(Immediate(Smi::FromInt(scope()->num_parameters())));
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
__ CallStub(&stub);
__ mov(ecx, eax); // Duplicate result.
Move(arguments->slot(), eax, ebx, edx);
Slot* dot_arguments_slot =
scope()->arguments_shadow()->AsVariable()->slot();
Move(dot_arguments_slot, ecx, ebx, edx);
} }
// Receiver is just before the parameters on the caller's stack.
int offset = scope()->num_parameters() * kPointerSize;
__ lea(edx,
Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
__ push(edx);
__ push(Immediate(Smi::FromInt(scope()->num_parameters())));
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
__ CallStub(&stub);
__ mov(ecx, eax); // Duplicate result.
Move(arguments->slot(), eax, ebx, edx);
Slot* dot_arguments_slot =
scope()->arguments_shadow()->AsVariable()->slot();
Move(dot_arguments_slot, ecx, ebx, edx);
} }
{ Comment cmnt(masm_, "[ Declarations"); { Comment cmnt(masm_, "[ Declarations");
@ -1048,7 +1046,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(ecx); // Enumerable. __ push(ecx); // Enumerable.
__ push(ebx); // Current entry. __ push(ebx); // Current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION); __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
__ cmp(eax, Factory::null_value()); __ test(eax, Operand(eax));
__ j(equal, loop_statement.continue_target()); __ j(equal, loop_statement.continue_target());
__ mov(ebx, Operand(eax)); __ mov(ebx, Operand(eax));
@ -2054,6 +2052,25 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
} }
void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
VisitForValue(args->at(0), kAccumulator);
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
// Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
// used in a few functions in runtime.js which should not normally be hit by
// this compiler.
__ jmp(if_false);
Apply(context_, if_true, if_false);
}
void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) { void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1); ASSERT(args->length() == 1);

66
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -373,7 +373,13 @@ void MacroAssembler::AbortIfNotNumber(Register object) {
void MacroAssembler::AbortIfNotSmi(Register object) { void MacroAssembler::AbortIfNotSmi(Register object) {
test(object, Immediate(kSmiTagMask)); test(object, Immediate(kSmiTagMask));
Assert(equal, "Operand not a smi"); Assert(equal, "Operand is not a smi");
}
void MacroAssembler::AbortIfSmi(Register object) {
test(object, Immediate(kSmiTagMask));
Assert(not_equal, "Operand is a smi");
} }
@ -1292,7 +1298,7 @@ void MacroAssembler::InvokeFunction(Register fun,
mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
SmiUntag(ebx); SmiUntag(ebx);
mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); mov(edx, FieldOperand(edi, JSFunction::kCodeOffset));
lea(edx, FieldOperand(edx, Code::kHeaderSize)); lea(edx, FieldOperand(edx, Code::kHeaderSize));
ParameterCount expected(ebx); ParameterCount expected(ebx);
@ -1344,8 +1350,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
// Make sure the code objects in the builtins object and in the // Make sure the code objects in the builtins object and in the
// builtin function are the same. // builtin function are the same.
push(target); push(target);
mov(target, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); mov(target, FieldOperand(edi, JSFunction::kCodeOffset));
mov(target, FieldOperand(target, SharedFunctionInfo::kCodeOffset));
cmp(target, Operand(esp, 0)); cmp(target, Operand(esp, 0));
Assert(equal, "Builtin code object changed"); Assert(equal, "Builtin code object changed");
pop(target); pop(target);
@ -1510,6 +1515,59 @@ void MacroAssembler::Abort(const char* msg) {
} }
void MacroAssembler::JumpIfNotNumber(Register reg,
TypeInfo info,
Label* on_not_number) {
if (FLAG_debug_code) AbortIfSmi(reg);
if (!info.IsNumber()) {
cmp(FieldOperand(reg, HeapObject::kMapOffset),
Factory::heap_number_map());
j(not_equal, on_not_number);
}
}
void MacroAssembler::ConvertToInt32(Register dst,
Register source,
Register scratch,
TypeInfo info,
Label* on_not_int32) {
if (FLAG_debug_code) {
AbortIfSmi(source);
AbortIfNotNumber(source);
}
if (info.IsInteger32()) {
cvttsd2si(dst, FieldOperand(source, HeapNumber::kValueOffset));
} else {
Label done;
bool push_pop = (scratch.is(no_reg) && dst.is(source));
ASSERT(!scratch.is(source));
if (push_pop) {
push(dst);
scratch = dst;
}
if (scratch.is(no_reg)) scratch = dst;
cvttsd2si(scratch, FieldOperand(source, HeapNumber::kValueOffset));
cmp(scratch, 0x80000000u);
if (push_pop) {
j(not_equal, &done);
pop(dst);
jmp(on_not_int32);
} else {
j(equal, on_not_int32);
}
bind(&done);
if (push_pop) {
add(Operand(esp), Immediate(kPointerSize)); // Pop.
}
if (!scratch.is(dst)) {
mov(dst, scratch);
}
}
}
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii( void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
Register instance_type, Register instance_type,
Register scratch, Register scratch,

33
deps/v8/src/ia32/macro-assembler-ia32.h

@ -29,6 +29,7 @@
#define V8_IA32_MACRO_ASSEMBLER_IA32_H_ #define V8_IA32_MACRO_ASSEMBLER_IA32_H_
#include "assembler.h" #include "assembler.h"
#include "type-info.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -225,12 +226,44 @@ class MacroAssembler: public Assembler {
sar(reg, kSmiTagSize); sar(reg, kSmiTagSize);
} }
// Modifies the register even if it does not contain a Smi!
void SmiUntag(Register reg, TypeInfo info, Label* non_smi) {
ASSERT(kSmiTagSize == 1);
sar(reg, kSmiTagSize);
if (info.IsSmi()) {
ASSERT(kSmiTag == 0);
j(carry, non_smi);
}
}
// Modifies the register even if it does not contain a Smi!
void SmiUntag(Register reg, Label* is_smi) {
ASSERT(kSmiTagSize == 1);
sar(reg, kSmiTagSize);
ASSERT(kSmiTag == 0);
j(not_carry, is_smi);
}
// Assumes input is a heap object.
void JumpIfNotNumber(Register reg, TypeInfo info, Label* on_not_number);
// Assumes input is a heap number. Jumps on things out of range. Also jumps
// on the min negative int32. Ignores frational parts.
void ConvertToInt32(Register dst,
Register src, // Can be the same as dst.
Register scratch, // Can be no_reg or dst, but not src.
TypeInfo info,
Label* on_not_int32);
// Abort execution if argument is not a number. Used in debug code. // Abort execution if argument is not a number. Used in debug code.
void AbortIfNotNumber(Register object); void AbortIfNotNumber(Register object);
// Abort execution if argument is not a smi. Used in debug code. // Abort execution if argument is not a smi. Used in debug code.
void AbortIfNotSmi(Register object); void AbortIfNotSmi(Register object);
// Abort execution if argument is a smi. Used in debug code.
void AbortIfSmi(Register object);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Exception handling // Exception handling

32
deps/v8/src/ia32/stub-cache-ia32.cc

@ -1255,30 +1255,6 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
} }
// TODO(1241006): Avoid having lazy compile stubs specialized by the
// number of arguments. It is not needed anymore.
Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
// Enter an internal frame.
__ EnterInternalFrame();
// Push a copy of the function onto the stack.
__ push(edi);
__ push(edi); // function is also the parameter to the runtime call
__ CallRuntime(Runtime::kLazyCompile, 1);
__ pop(edi);
// Tear down temporary frame.
__ LeaveInternalFrame();
// Do a tail-call of the compiled function.
__ lea(ecx, FieldOperand(eax, Code::kHeaderSize));
__ jmp(Operand(ecx));
return GetCodeWithFlags(flags, "LazyCompileStub");
}
void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) { void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) { if (kind_ == Code::KEYED_CALL_IC) {
__ cmp(Operand(ecx), Immediate(Handle<String>(name))); __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
@ -1595,6 +1571,9 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
// -- esp[(argc + 1) * 4] : receiver // -- esp[(argc + 1) * 4] : receiver
// ----------------------------------- // -----------------------------------
// If object is not a string, bail out to regular call.
if (!object->IsString()) return Heap::undefined_value();
const int argc = arguments().immediate(); const int argc = arguments().immediate();
Label miss; Label miss;
@ -1605,6 +1584,7 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
GenerateDirectLoadGlobalFunctionPrototype(masm(), GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX, Context::STRING_FUNCTION_INDEX,
eax); eax);
ASSERT(object != holder);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder, CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, edi, name, &miss); ebx, edx, edi, name, &miss);
@ -1659,6 +1639,9 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
// -- esp[(argc + 1) * 4] : receiver // -- esp[(argc + 1) * 4] : receiver
// ----------------------------------- // -----------------------------------
// If object is not a string, bail out to regular call.
if (!object->IsString()) return Heap::undefined_value();
const int argc = arguments().immediate(); const int argc = arguments().immediate();
Label miss; Label miss;
@ -1670,6 +1653,7 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
GenerateDirectLoadGlobalFunctionPrototype(masm(), GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX, Context::STRING_FUNCTION_INDEX,
eax); eax);
ASSERT(object != holder);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder, CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, edi, name, &miss); ebx, edx, edi, name, &miss);

16
deps/v8/src/ia32/virtual-frame-ia32.h

@ -139,6 +139,22 @@ class VirtualFrame: public ZoneObject {
if (is_used(reg)) SpillElementAt(register_location(reg)); if (is_used(reg)) SpillElementAt(register_location(reg));
} }
// Make the two registers distinct and spill them. Returns the second
// register. If the registers were not distinct then it returns the new
// second register.
Result MakeDistinctAndSpilled(Result* left, Result* right) {
Spill(left->reg());
Spill(right->reg());
if (left->reg().is(right->reg())) {
RegisterAllocator* allocator = cgen()->allocator();
Result fresh = allocator->Allocate();
ASSERT(fresh.is_valid());
masm()->mov(fresh.reg(), right->reg());
return fresh;
}
return *right;
}
// Spill all occurrences of an arbitrary register if possible. Return the // Spill all occurrences of an arbitrary register if possible. Return the
// register spilled or no_reg if it was not possible to free any register // register spilled or no_reg if it was not possible to free any register
// (ie, they all have frame-external references). // (ie, they all have frame-external references).

7
deps/v8/src/list-inl.h

@ -126,6 +126,13 @@ void List<T, P>::Iterate(void (*callback)(T* x)) {
} }
template<typename T, class P>
template<class Visitor>
void List<T, P>::Iterate(Visitor* visitor) {
for (int i = 0; i < length_; i++) visitor->Apply(&data_[i]);
}
template<typename T, class P> template<typename T, class P>
bool List<T, P>::Contains(const T& elm) { bool List<T, P>::Contains(const T& elm) {
for (int i = 0; i < length_; i++) { for (int i = 0; i < length_; i++) {

2
deps/v8/src/list.h

@ -117,6 +117,8 @@ class List {
// Iterate through all list entries, starting at index 0. // Iterate through all list entries, starting at index 0.
void Iterate(void (*callback)(T* x)); void Iterate(void (*callback)(T* x));
template<class Visitor>
void Iterate(Visitor* visitor);
// Sort all list entries (using QuickSort) // Sort all list entries (using QuickSort)
void Sort(int (*cmp)(const T* x, const T* y)); void Sort(int (*cmp)(const T* x, const T* y));

7
deps/v8/src/liveedit-debugger.js

@ -800,9 +800,10 @@ Debug.LiveEdit = new function() {
this.end_position = raw_array[2]; this.end_position = raw_array[2];
this.param_num = raw_array[3]; this.param_num = raw_array[3];
this.code = raw_array[4]; this.code = raw_array[4];
this.scope_info = raw_array[5]; this.code_scope_info = raw_array[5];
this.outer_index = raw_array[6]; this.scope_info = raw_array[6];
this.shared_function_info = raw_array[7]; this.outer_index = raw_array[7];
this.shared_function_info = raw_array[8];
this.next_sibling_index = null; this.next_sibling_index = null;
this.raw_array = raw_array; this.raw_array = raw_array;
} }

42
deps/v8/src/liveedit.cc

@ -32,6 +32,7 @@
#include "compiler.h" #include "compiler.h"
#include "oprofile-agent.h" #include "oprofile-agent.h"
#include "scopes.h" #include "scopes.h"
#include "scopeinfo.h"
#include "global-handles.h" #include "global-handles.h"
#include "debug.h" #include "debug.h"
#include "memory.h" #include "memory.h"
@ -500,12 +501,16 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
this->SetSmiValueField(kParamNumOffset_, param_num); this->SetSmiValueField(kParamNumOffset_, param_num);
this->SetSmiValueField(kParentIndexOffset_, parent_index); this->SetSmiValueField(kParentIndexOffset_, parent_index);
} }
void SetFunctionCode(Handle<Code> function_code) { void SetFunctionCode(Handle<Code> function_code,
Handle<JSValue> wrapper = WrapInJSValue(*function_code); Handle<Object> code_scope_info) {
this->SetField(kCodeOffset_, wrapper); Handle<JSValue> code_wrapper = WrapInJSValue(*function_code);
this->SetField(kCodeOffset_, code_wrapper);
Handle<JSValue> scope_wrapper = WrapInJSValue(*code_scope_info);
this->SetField(kCodeScopeInfoOffset_, scope_wrapper);
} }
void SetScopeInfo(Handle<Object> scope_info_array) { void SetOuterScopeInfo(Handle<Object> scope_info_array) {
this->SetField(kScopeInfoOffset_, scope_info_array); this->SetField(kOuterScopeInfoOffset_, scope_info_array);
} }
void SetSharedFunctionInfo(Handle<SharedFunctionInfo> info) { void SetSharedFunctionInfo(Handle<SharedFunctionInfo> info) {
Handle<JSValue> info_holder = WrapInJSValue(*info); Handle<JSValue> info_holder = WrapInJSValue(*info);
@ -519,6 +524,11 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
JSValue::cast(this->GetField(kCodeOffset_)))); JSValue::cast(this->GetField(kCodeOffset_))));
return Handle<Code>::cast(raw_result); return Handle<Code>::cast(raw_result);
} }
Handle<Object> GetCodeScopeInfo() {
Handle<Object> raw_result = UnwrapJSValue(Handle<JSValue>(
JSValue::cast(this->GetField(kCodeScopeInfoOffset_))));
return raw_result;
}
int GetStartPosition() { int GetStartPosition() {
return this->GetSmiValueField(kStartPositionOffset_); return this->GetSmiValueField(kStartPositionOffset_);
} }
@ -532,10 +542,11 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
static const int kEndPositionOffset_ = 2; static const int kEndPositionOffset_ = 2;
static const int kParamNumOffset_ = 3; static const int kParamNumOffset_ = 3;
static const int kCodeOffset_ = 4; static const int kCodeOffset_ = 4;
static const int kScopeInfoOffset_ = 5; static const int kCodeScopeInfoOffset_ = 5;
static const int kParentIndexOffset_ = 6; static const int kOuterScopeInfoOffset_ = 6;
static const int kSharedFunctionInfoOffset_ = 7; static const int kParentIndexOffset_ = 7;
static const int kSize_ = 8; static const int kSharedFunctionInfoOffset_ = 8;
static const int kSize_ = 9;
friend class JSArrayBasedStruct<FunctionInfoWrapper>; friend class JSArrayBasedStruct<FunctionInfoWrapper>;
}; };
@ -671,7 +682,7 @@ class FunctionInfoListener {
void FunctionCode(Handle<Code> function_code) { void FunctionCode(Handle<Code> function_code) {
FunctionInfoWrapper info = FunctionInfoWrapper info =
FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_)); FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_));
info.SetFunctionCode(function_code); info.SetFunctionCode(function_code, Handle<Object>(Heap::null_value()));
} }
// Saves full information about a function: its code, its scope info // Saves full information about a function: its code, its scope info
@ -682,11 +693,12 @@ class FunctionInfoListener {
} }
FunctionInfoWrapper info = FunctionInfoWrapper info =
FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_)); FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_));
info.SetFunctionCode(Handle<Code>(shared->code())); info.SetFunctionCode(Handle<Code>(shared->code()),
Handle<Object>(shared->scope_info()));
info.SetSharedFunctionInfo(shared); info.SetSharedFunctionInfo(shared);
Handle<Object> scope_info_list(SerializeFunctionScope(scope)); Handle<Object> scope_info_list(SerializeFunctionScope(scope));
info.SetScopeInfo(scope_info_list); info.SetOuterScopeInfo(scope_info_list);
} }
Handle<JSArray> GetResult() { Handle<JSArray> GetResult() {
@ -855,6 +867,10 @@ Object* LiveEdit::ReplaceFunctionCode(Handle<JSArray> new_compile_info_array,
if (IsJSFunctionCode(shared_info->code())) { if (IsJSFunctionCode(shared_info->code())) {
ReplaceCodeObject(shared_info->code(), ReplaceCodeObject(shared_info->code(),
*(compile_info_wrapper.GetFunctionCode())); *(compile_info_wrapper.GetFunctionCode()));
Handle<Object> code_scope_info = compile_info_wrapper.GetCodeScopeInfo();
if (code_scope_info->IsFixedArray()) {
shared_info->set_scope_info(SerializedScopeInfo::cast(*code_scope_info));
}
} }
if (shared_info->debug_info()->IsDebugInfo()) { if (shared_info->debug_info()->IsDebugInfo()) {
@ -1190,7 +1206,7 @@ static const char* DropFrames(Vector<StackFrame*> frames,
int bottom_js_frame_index, int bottom_js_frame_index,
Debug::FrameDropMode* mode, Debug::FrameDropMode* mode,
Object*** restarter_frame_function_pointer) { Object*** restarter_frame_function_pointer) {
if (Debug::kFrameDropperFrameSize < 0) { if (!Debug::kFrameDropperSupported) {
return "Stack manipulations are not supported in this architecture."; return "Stack manipulations are not supported in this architecture.";
} }

256
deps/v8/src/mark-compact.cc

@ -32,6 +32,7 @@
#include "global-handles.h" #include "global-handles.h"
#include "ic-inl.h" #include "ic-inl.h"
#include "mark-compact.h" #include "mark-compact.h"
#include "objects-visiting.h"
#include "stub-cache.h" #include "stub-cache.h"
namespace v8 { namespace v8 {
@ -63,6 +64,7 @@ int MarkCompactCollector::live_cell_objects_size_ = 0;
int MarkCompactCollector::live_lo_objects_size_ = 0; int MarkCompactCollector::live_lo_objects_size_ = 0;
#endif #endif
void MarkCompactCollector::CollectGarbage() { void MarkCompactCollector::CollectGarbage() {
// Make sure that Prepare() has been called. The individual steps below will // Make sure that Prepare() has been called. The individual steps below will
// update the state as they proceed. // update the state as they proceed.
@ -244,14 +246,72 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
} }
// Helper class for marking pointers in HeapObjects. class StaticMarkingVisitor : public StaticVisitorBase {
class MarkingVisitor : public ObjectVisitor {
public: public:
void VisitPointer(Object** p) { static inline void IterateBody(Map* map, HeapObject* obj) {
table_.GetVisitor(map)(map, obj);
}
static void Initialize() {
table_.Register(kVisitShortcutCandidate,
&FixedBodyVisitor<StaticMarkingVisitor,
ConsString::BodyDescriptor,
void>::Visit);
table_.Register(kVisitConsString,
&FixedBodyVisitor<StaticMarkingVisitor,
ConsString::BodyDescriptor,
void>::Visit);
table_.Register(kVisitFixedArray,
&FlexibleBodyVisitor<StaticMarkingVisitor,
FixedArray::BodyDescriptor,
void>::Visit);
table_.Register(kVisitSharedFunctionInfo,
&FixedBodyVisitor<StaticMarkingVisitor,
SharedFunctionInfo::BodyDescriptor,
void>::Visit);
table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
table_.Register(kVisitOddball,
&FixedBodyVisitor<StaticMarkingVisitor,
Oddball::BodyDescriptor,
void>::Visit);
table_.Register(kVisitMap,
&FixedBodyVisitor<StaticMarkingVisitor,
Map::BodyDescriptor,
void>::Visit);
table_.Register(kVisitCode, &VisitCode);
table_.Register(kVisitPropertyCell,
&FixedBodyVisitor<StaticMarkingVisitor,
JSGlobalPropertyCell::BodyDescriptor,
void>::Visit);
table_.RegisterSpecializations<DataObjectVisitor,
kVisitDataObject,
kVisitDataObjectGeneric>();
table_.RegisterSpecializations<JSObjectVisitor,
kVisitJSObject,
kVisitJSObjectGeneric>();
table_.RegisterSpecializations<StructObjectVisitor,
kVisitStruct,
kVisitStructGeneric>();
}
INLINE(static void VisitPointer(Object** p)) {
MarkObjectByPointer(p); MarkObjectByPointer(p);
} }
void VisitPointers(Object** start, Object** end) { INLINE(static void VisitPointers(Object** start, Object** end)) {
// Mark all objects pointed to in [start, end). // Mark all objects pointed to in [start, end).
const int kMinRangeForMarkingRecursion = 64; const int kMinRangeForMarkingRecursion = 64;
if (end - start >= kMinRangeForMarkingRecursion) { if (end - start >= kMinRangeForMarkingRecursion) {
@ -261,7 +321,7 @@ class MarkingVisitor : public ObjectVisitor {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p); for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
} }
void VisitCodeTarget(RelocInfo* rinfo) { static inline void VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address()); Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) { if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
@ -273,7 +333,7 @@ class MarkingVisitor : public ObjectVisitor {
} }
} }
void VisitDebugTarget(RelocInfo* rinfo) { static inline void VisitDebugTarget(RelocInfo* rinfo) {
ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
rinfo->IsPatchedReturnSequence()) || rinfo->IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
@ -282,19 +342,15 @@ class MarkingVisitor : public ObjectVisitor {
MarkCompactCollector::MarkObject(code); MarkCompactCollector::MarkObject(code);
} }
private:
// Mark object pointed to by p. // Mark object pointed to by p.
void MarkObjectByPointer(Object** p) { INLINE(static void MarkObjectByPointer(Object** p)) {
if (!(*p)->IsHeapObject()) return; if (!(*p)->IsHeapObject()) return;
HeapObject* object = ShortCircuitConsString(p); HeapObject* object = ShortCircuitConsString(p);
MarkCompactCollector::MarkObject(object); MarkCompactCollector::MarkObject(object);
} }
// Tells whether the mark sweep collection will perform compaction.
bool IsCompacting() { return MarkCompactCollector::IsCompacting(); }
// Visit an unmarked object. // Visit an unmarked object.
void VisitUnmarkedObject(HeapObject* obj) { static inline void VisitUnmarkedObject(HeapObject* obj) {
#ifdef DEBUG #ifdef DEBUG
ASSERT(Heap::Contains(obj)); ASSERT(Heap::Contains(obj));
ASSERT(!obj->IsMarked()); ASSERT(!obj->IsMarked());
@ -303,12 +359,12 @@ class MarkingVisitor : public ObjectVisitor {
MarkCompactCollector::SetMark(obj); MarkCompactCollector::SetMark(obj);
// Mark the map pointer and the body. // Mark the map pointer and the body.
MarkCompactCollector::MarkObject(map); MarkCompactCollector::MarkObject(map);
obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), this); IterateBody(map, obj);
} }
// Visit all unmarked objects pointed to by [start, end). // Visit all unmarked objects pointed to by [start, end).
// Returns false if the operation fails (lack of stack space). // Returns false if the operation fails (lack of stack space).
inline bool VisitUnmarkedObjects(Object** start, Object** end) { static inline bool VisitUnmarkedObjects(Object** start, Object** end) {
// Return false is we are close to the stack limit. // Return false is we are close to the stack limit.
StackLimitCheck check; StackLimitCheck check;
if (check.HasOverflowed()) return false; if (check.HasOverflowed()) return false;
@ -322,6 +378,60 @@ class MarkingVisitor : public ObjectVisitor {
} }
return true; return true;
} }
static inline void VisitExternalReference(Address* p) { }
static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
private:
class DataObjectVisitor {
public:
template<int size>
static void VisitSpecialized(Map* map, HeapObject* object) {
}
static void Visit(Map* map, HeapObject* object) {
}
};
typedef FlexibleBodyVisitor<StaticMarkingVisitor,
JSObject::BodyDescriptor,
void> JSObjectVisitor;
typedef FlexibleBodyVisitor<StaticMarkingVisitor,
StructBodyDescriptor,
void> StructObjectVisitor;
static void VisitCode(Map* map, HeapObject* object) {
reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>();
}
typedef void (*Callback)(Map* map, HeapObject* object);
static VisitorDispatchTable<Callback> table_;
};
VisitorDispatchTable<StaticMarkingVisitor::Callback>
StaticMarkingVisitor::table_;
class MarkingVisitor : public ObjectVisitor {
public:
void VisitPointer(Object** p) {
StaticMarkingVisitor::VisitPointer(p);
}
void VisitPointers(Object** start, Object** end) {
StaticMarkingVisitor::VisitPointers(start, end);
}
void VisitCodeTarget(RelocInfo* rinfo) {
StaticMarkingVisitor::VisitCodeTarget(rinfo);
}
void VisitDebugTarget(RelocInfo* rinfo) {
StaticMarkingVisitor::VisitDebugTarget(rinfo);
}
}; };
@ -336,11 +446,7 @@ class RootMarkingVisitor : public ObjectVisitor {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p); for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
} }
MarkingVisitor* stack_visitor() { return &stack_visitor_; }
private: private:
MarkingVisitor stack_visitor_;
void MarkObjectByPointer(Object** p) { void MarkObjectByPointer(Object** p) {
if (!(*p)->IsHeapObject()) return; if (!(*p)->IsHeapObject()) return;
@ -351,14 +457,14 @@ class RootMarkingVisitor : public ObjectVisitor {
Map* map = object->map(); Map* map = object->map();
// Mark the object. // Mark the object.
MarkCompactCollector::SetMark(object); MarkCompactCollector::SetMark(object);
// Mark the map pointer and body, and push them on the marking stack. // Mark the map pointer and body, and push them on the marking stack.
MarkCompactCollector::MarkObject(map); MarkCompactCollector::MarkObject(map);
object->IterateBody(map->instance_type(), object->SizeFromMap(map), StaticMarkingVisitor::IterateBody(map, object);
&stack_visitor_);
// Mark all the objects reachable from the map and body. May leave // Mark all the objects reachable from the map and body. May leave
// overflowed objects in the heap. // overflowed objects in the heap.
MarkCompactCollector::EmptyMarkingStack(&stack_visitor_); MarkCompactCollector::EmptyMarkingStack();
} }
}; };
@ -425,11 +531,12 @@ void MarkCompactCollector::MarkMapContents(Map* map) {
// Mark the Object* fields of the Map. // Mark the Object* fields of the Map.
// Since the descriptor array has been marked already, it is fine // Since the descriptor array has been marked already, it is fine
// that one of these fields contains a pointer to it. // that one of these fields contains a pointer to it.
MarkingVisitor visitor; // Has no state or contents. Object** start_slot = HeapObject::RawField(map,
visitor.VisitPointers(HeapObject::RawField(map, Map::kPointerFieldsBeginOffset);
Map::kPointerFieldsBeginOffset),
HeapObject::RawField(map, Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
Map::kPointerFieldsEndOffset));
StaticMarkingVisitor::VisitPointers(start_slot, end_slot);
} }
@ -447,10 +554,11 @@ void MarkCompactCollector::MarkDescriptorArray(
ASSERT(contents->IsFixedArray()); ASSERT(contents->IsFixedArray());
ASSERT(contents->length() >= 2); ASSERT(contents->length() >= 2);
SetMark(contents); SetMark(contents);
// Contents contains (value, details) pairs. If the details say // Contents contains (value, details) pairs. If the details say that
// that the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, // the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, or
// or NULL_DESCRIPTOR, we don't mark the value as live. Only for // NULL_DESCRIPTOR, we don't mark the value as live. Only for
// type MAP_TRANSITION is the value a Object* (a Map*). // MAP_TRANSITION and CONSTANT_TRANSITION is the value an Object* (a
// Map*).
for (int i = 0; i < contents->length(); i += 2) { for (int i = 0; i < contents->length(); i += 2) {
// If the pair (value, details) at index i, i+1 is not // If the pair (value, details) at index i, i+1 is not
// a transition or null descriptor, mark the value. // a transition or null descriptor, mark the value.
@ -529,7 +637,7 @@ void MarkCompactCollector::MarkSymbolTable() {
// Explicitly mark the prefix. // Explicitly mark the prefix.
MarkingVisitor marker; MarkingVisitor marker;
symbol_table->IteratePrefix(&marker); symbol_table->IteratePrefix(&marker);
ProcessMarkingStack(&marker); ProcessMarkingStack();
} }
@ -544,7 +652,7 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
// There may be overflowed objects in the heap. Visit them now. // There may be overflowed objects in the heap. Visit them now.
while (marking_stack.overflowed()) { while (marking_stack.overflowed()) {
RefillMarkingStack(); RefillMarkingStack();
EmptyMarkingStack(visitor->stack_visitor()); EmptyMarkingStack();
} }
} }
@ -587,7 +695,7 @@ void MarkCompactCollector::MarkObjectGroups() {
// Before: the marking stack contains zero or more heap object pointers. // Before: the marking stack contains zero or more heap object pointers.
// After: the marking stack is empty, and all objects reachable from the // After: the marking stack is empty, and all objects reachable from the
// marking stack have been marked, or are overflowed in the heap. // marking stack have been marked, or are overflowed in the heap.
void MarkCompactCollector::EmptyMarkingStack(MarkingVisitor* visitor) { void MarkCompactCollector::EmptyMarkingStack() {
while (!marking_stack.is_empty()) { while (!marking_stack.is_empty()) {
HeapObject* object = marking_stack.Pop(); HeapObject* object = marking_stack.Pop();
ASSERT(object->IsHeapObject()); ASSERT(object->IsHeapObject());
@ -601,8 +709,8 @@ void MarkCompactCollector::EmptyMarkingStack(MarkingVisitor* visitor) {
map_word.ClearMark(); map_word.ClearMark();
Map* map = map_word.ToMap(); Map* map = map_word.ToMap();
MarkObject(map); MarkObject(map);
object->IterateBody(map->instance_type(), object->SizeFromMap(map),
visitor); StaticMarkingVisitor::IterateBody(map, object);
} }
} }
@ -652,22 +760,22 @@ void MarkCompactCollector::RefillMarkingStack() {
// stack. Before: the marking stack contains zero or more heap object // stack. Before: the marking stack contains zero or more heap object
// pointers. After: the marking stack is empty and there are no overflowed // pointers. After: the marking stack is empty and there are no overflowed
// objects in the heap. // objects in the heap.
void MarkCompactCollector::ProcessMarkingStack(MarkingVisitor* visitor) { void MarkCompactCollector::ProcessMarkingStack() {
EmptyMarkingStack(visitor); EmptyMarkingStack();
while (marking_stack.overflowed()) { while (marking_stack.overflowed()) {
RefillMarkingStack(); RefillMarkingStack();
EmptyMarkingStack(visitor); EmptyMarkingStack();
} }
} }
void MarkCompactCollector::ProcessObjectGroups(MarkingVisitor* visitor) { void MarkCompactCollector::ProcessObjectGroups() {
bool work_to_do = true; bool work_to_do = true;
ASSERT(marking_stack.is_empty()); ASSERT(marking_stack.is_empty());
while (work_to_do) { while (work_to_do) {
MarkObjectGroups(); MarkObjectGroups();
work_to_do = !marking_stack.is_empty(); work_to_do = !marking_stack.is_empty();
ProcessMarkingStack(visitor); ProcessMarkingStack();
} }
} }
@ -692,7 +800,7 @@ void MarkCompactCollector::MarkLiveObjects() {
// objects are unmarked. Mark objects reachable from object groups // objects are unmarked. Mark objects reachable from object groups
// containing at least one marked object, and continue until no new // containing at least one marked object, and continue until no new
// objects are reachable from the object groups. // objects are reachable from the object groups.
ProcessObjectGroups(root_visitor.stack_visitor()); ProcessObjectGroups();
// The objects reachable from the roots or object groups are marked, // The objects reachable from the roots or object groups are marked,
// yet unreachable objects are unmarked. Mark objects reachable // yet unreachable objects are unmarked. Mark objects reachable
@ -705,12 +813,12 @@ void MarkCompactCollector::MarkLiveObjects() {
GlobalHandles::IterateWeakRoots(&root_visitor); GlobalHandles::IterateWeakRoots(&root_visitor);
while (marking_stack.overflowed()) { while (marking_stack.overflowed()) {
RefillMarkingStack(); RefillMarkingStack();
EmptyMarkingStack(root_visitor.stack_visitor()); EmptyMarkingStack();
} }
// Repeat the object groups to mark unmarked groups reachable from the // Repeat the object groups to mark unmarked groups reachable from the
// weak roots. // weak roots.
ProcessObjectGroups(root_visitor.stack_visitor()); ProcessObjectGroups();
// Prune the symbol table removing all symbols only pointed to by the // Prune the symbol table removing all symbols only pointed to by the
// symbol table. Cannot use symbol_table() here because the symbol // symbol table. Cannot use symbol_table() here because the symbol
@ -1091,16 +1199,35 @@ static void MigrateObject(Address dst,
} }
class StaticPointersToNewGenUpdatingVisitor : public
StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
public:
static inline void VisitPointer(Object** p) {
if (!(*p)->IsHeapObject()) return;
HeapObject* obj = HeapObject::cast(*p);
Address old_addr = obj->address();
if (Heap::new_space()->Contains(obj)) {
ASSERT(Heap::InFromSpace(*p));
*p = HeapObject::FromAddress(Memory::Address_at(old_addr));
}
}
};
// Visitor for updating pointers from live objects in old spaces to new space. // Visitor for updating pointers from live objects in old spaces to new space.
// It does not expect to encounter pointers to dead objects. // It does not expect to encounter pointers to dead objects.
class PointersToNewGenUpdatingVisitor: public ObjectVisitor { class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
public: public:
void VisitPointer(Object** p) { void VisitPointer(Object** p) {
UpdatePointer(p); StaticPointersToNewGenUpdatingVisitor::VisitPointer(p);
} }
void VisitPointers(Object** start, Object** end) { void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) UpdatePointer(p); for (Object** p = start; p < end; p++) {
StaticPointersToNewGenUpdatingVisitor::VisitPointer(p);
}
} }
void VisitCodeTarget(RelocInfo* rinfo) { void VisitCodeTarget(RelocInfo* rinfo) {
@ -1119,19 +1246,6 @@ class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
VisitPointer(&target); VisitPointer(&target);
rinfo->set_call_address(Code::cast(target)->instruction_start()); rinfo->set_call_address(Code::cast(target)->instruction_start());
} }
private:
void UpdatePointer(Object** p) {
if (!(*p)->IsHeapObject()) return;
HeapObject* obj = HeapObject::cast(*p);
Address old_addr = obj->address();
if (Heap::new_space()->Contains(obj)) {
ASSERT(Heap::InFromSpace(*p));
*p = HeapObject::FromAddress(Memory::Address_at(old_addr));
}
}
}; };
@ -1248,15 +1362,12 @@ static void SweepNewSpace(NewSpace* space) {
PointersToNewGenUpdatingVisitor updating_visitor; PointersToNewGenUpdatingVisitor updating_visitor;
// Update pointers in to space. // Update pointers in to space.
HeapObject* object; Address current = space->bottom();
for (Address current = space->bottom(); while (current < space->top()) {
current < space->top(); HeapObject* object = HeapObject::FromAddress(current);
current += object->Size()) { current +=
object = HeapObject::FromAddress(current); StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
object);
object->IterateBody(object->map()->instance_type(),
object->Size(),
&updating_visitor);
} }
// Update roots. // Update roots.
@ -1758,7 +1869,9 @@ void MarkCompactCollector::SweepSpaces() {
SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock); SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
SweepSpace(Heap::code_space(), &DeallocateCodeBlock); SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
SweepSpace(Heap::cell_space(), &DeallocateCellBlock); SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
SweepNewSpace(Heap::new_space()); { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
SweepNewSpace(Heap::new_space());
}
SweepSpace(Heap::map_space(), &DeallocateMapBlock); SweepSpace(Heap::map_space(), &DeallocateMapBlock);
Heap::IterateDirtyRegions(Heap::map_space(), Heap::IterateDirtyRegions(Heap::map_space(),
@ -2327,4 +2440,11 @@ void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
#endif #endif
} }
void MarkCompactCollector::Initialize() {
StaticPointersToNewGenUpdatingVisitor::Initialize();
StaticMarkingVisitor::Initialize();
}
} } // namespace v8::internal } } // namespace v8::internal

10
deps/v8/src/mark-compact.h

@ -86,6 +86,9 @@ class MarkCompactCollector: public AllStatic {
force_compaction_ = value; force_compaction_ = value;
} }
static void Initialize();
// Prepares for GC by resetting relocation info in old and map spaces and // Prepares for GC by resetting relocation info in old and map spaces and
// choosing spaces to compact. // choosing spaces to compact.
static void Prepare(GCTracer* tracer); static void Prepare(GCTracer* tracer);
@ -171,6 +174,7 @@ class MarkCompactCollector: public AllStatic {
friend class RootMarkingVisitor; friend class RootMarkingVisitor;
friend class MarkingVisitor; friend class MarkingVisitor;
friend class StaticMarkingVisitor;
// Marking operations for objects reachable from roots. // Marking operations for objects reachable from roots.
static void MarkLiveObjects(); static void MarkLiveObjects();
@ -214,17 +218,17 @@ class MarkCompactCollector: public AllStatic {
// Mark all objects in an object group with at least one marked // Mark all objects in an object group with at least one marked
// object, then all objects reachable from marked objects in object // object, then all objects reachable from marked objects in object
// groups, and repeat. // groups, and repeat.
static void ProcessObjectGroups(MarkingVisitor* visitor); static void ProcessObjectGroups();
// Mark objects reachable (transitively) from objects in the marking stack // Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap. // or overflowed in the heap.
static void ProcessMarkingStack(MarkingVisitor* visitor); static void ProcessMarkingStack();
// Mark objects reachable (transitively) from objects in the marking // Mark objects reachable (transitively) from objects in the marking
// stack. This function empties the marking stack, but may leave // stack. This function empties the marking stack, but may leave
// overflowed objects in the heap, in which case the marking stack's // overflowed objects in the heap, in which case the marking stack's
// overflow flag will be set. // overflow flag will be set.
static void EmptyMarkingStack(MarkingVisitor* visitor); static void EmptyMarkingStack();
// Refill the marking stack with overflowed objects from the heap. This // Refill the marking stack with overflowed objects from the heap. This
// function either leaves the marking stack full or clears the overflow // function either leaves the marking stack full or clears the overflow

12
deps/v8/src/messages.js

@ -707,14 +707,20 @@ CallSite.prototype.getMethodName = function () {
// See if we can find a unique property on the receiver that holds // See if we can find a unique property on the receiver that holds
// this function. // this function.
var ownName = this.fun.name; var ownName = this.fun.name;
if (ownName && this.receiver && this.receiver[ownName] === this.fun) if (ownName && this.receiver &&
(ObjectLookupGetter.call(this.receiver, ownName) === this.fun ||
ObjectLookupSetter.call(this.receiver, ownName) === this.fun ||
this.receiver[ownName] === this.fun)) {
// To handle DontEnum properties we guess that the method has // To handle DontEnum properties we guess that the method has
// the same name as the function. // the same name as the function.
return ownName; return ownName;
}
var name = null; var name = null;
for (var prop in this.receiver) { for (var prop in this.receiver) {
if (this.receiver[prop] === this.fun) { if (this.receiver.__lookupGetter__(prop) === this.fun ||
// If we find more than one match bail out to avoid confusion this.receiver.__lookupSetter__(prop) === this.fun ||
(!this.receiver.__lookupGetter__(prop) && this.receiver[prop] === this.fun)) {
// If we find more than one match bail out to avoid confusion.
if (name) if (name)
return null; return null;
name = prop; name = prop;

9
deps/v8/src/mips/debug-mips.cc

@ -114,15 +114,10 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
masm->Abort("LiveEdit frame dropping is not supported on mips"); masm->Abort("LiveEdit frame dropping is not supported on mips");
} }
#undef __
const bool Debug::kFrameDropperSupported = false;
Object** Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame, #undef __
Handle<Code> code) {
UNREACHABLE();
return NULL;
}
const int Debug::kFrameDropperFrameSize = -1;
#endif // ENABLE_DEBUGGER_SUPPORT #endif // ENABLE_DEBUGGER_SUPPORT

7
deps/v8/src/mips/simulator-mips.cc

@ -606,7 +606,7 @@ void Simulator::set_fpu_register(int fpureg, int32_t value) {
void Simulator::set_fpu_register_double(int fpureg, double value) { void Simulator::set_fpu_register_double(int fpureg, double value) {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0)); ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
*v8i::BitCast<double*, int32_t*>(&FPUregisters_[fpureg]) = value; *v8i::BitCast<double*>(&FPUregisters_[fpureg]) = value;
} }
@ -627,8 +627,7 @@ int32_t Simulator::get_fpu_register(int fpureg) const {
double Simulator::get_fpu_register_double(int fpureg) const { double Simulator::get_fpu_register_double(int fpureg) const {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0)); ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
return *v8i::BitCast<double*, int32_t*>( return *v8i::BitCast<double*>(const_cast<int32_t*>(&FPUregisters_[fpureg]));
const_cast<int32_t*>(&FPUregisters_[fpureg]));
} }
// Raw access to the PC register. // Raw access to the PC register.
@ -903,7 +902,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
break; break;
case MFHC1: case MFHC1:
fp_out = get_fpu_register_double(fs_reg); fp_out = get_fpu_register_double(fs_reg);
alu_out = *v8i::BitCast<int32_t*, double*>(&fp_out); alu_out = *v8i::BitCast<int32_t*>(&fp_out);
break; break;
case MTC1: case MTC1:
case MTHC1: case MTHC1:

112
deps/v8/src/objects-inl.h

@ -1493,6 +1493,16 @@ int DescriptorArray::Search(String* name) {
} }
int DescriptorArray::SearchWithCache(String* name) {
int number = DescriptorLookupCache::Lookup(this, name);
if (number == DescriptorLookupCache::kAbsent) {
number = Search(name);
DescriptorLookupCache::Update(this, name, number);
}
return number;
}
String* DescriptorArray::GetKey(int descriptor_number) { String* DescriptorArray::GetKey(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors()); ASSERT(descriptor_number < number_of_descriptors());
return String::cast(get(ToKeyIndex(descriptor_number))); return String::cast(get(ToKeyIndex(descriptor_number)));
@ -2060,21 +2070,8 @@ void ExternalFloatArray::set(int index, float value) {
ptr[index] = value; ptr[index] = value;
} }
inline Scavenger Map::scavenger() {
Scavenger callback = reinterpret_cast<Scavenger>(
READ_INTPTR_FIELD(this, kScavengerCallbackOffset));
ASSERT(callback == Heap::GetScavenger(instance_type(),
instance_size()));
return callback;
}
inline void Map::set_scavenger(Scavenger callback) { INT_ACCESSORS(Map, visitor_id, kScavengerCallbackOffset)
WRITE_INTPTR_FIELD(this,
kScavengerCallbackOffset,
reinterpret_cast<intptr_t>(callback));
}
int Map::instance_size() { int Map::instance_size() {
return READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2; return READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2;
@ -2099,7 +2096,7 @@ int HeapObject::SizeFromMap(Map* map) {
(kStringTag | kConsStringTag) || (kStringTag | kConsStringTag) ||
instance_type == JS_ARRAY_TYPE) return map->instance_size(); instance_type == JS_ARRAY_TYPE) return map->instance_size();
if (instance_type == FIXED_ARRAY_TYPE) { if (instance_type == FIXED_ARRAY_TYPE) {
return reinterpret_cast<FixedArray*>(this)->FixedArraySize(); return FixedArray::BodyDescriptor::SizeOf(map, this);
} }
if (instance_type == BYTE_ARRAY_TYPE) { if (instance_type == BYTE_ARRAY_TYPE) {
return reinterpret_cast<ByteArray*>(this)->ByteArraySize(); return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
@ -2661,8 +2658,7 @@ void SharedFunctionInfo::set_scope_info(SerializedScopeInfo* value,
bool SharedFunctionInfo::is_compiled() { bool SharedFunctionInfo::is_compiled() {
// TODO(1242782): Create a code kind for uncompiled code. return code() != Builtins::builtin(Builtins::LazyCompile);
return code()->kind() != Code::STUB;
} }
@ -2694,12 +2690,14 @@ bool JSFunction::IsBuiltin() {
Code* JSFunction::code() { Code* JSFunction::code() {
return shared()->code(); return Code::cast(READ_FIELD(this, kCodeOffset));
} }
void JSFunction::set_code(Code* value) { void JSFunction::set_code(Code* value) {
shared()->set_code(value); // Skip the write barrier because code is never in new space.
ASSERT(!Heap::InNewSpace(value));
WRITE_FIELD(this, kCodeOffset, value);
} }
@ -2771,7 +2769,7 @@ bool JSFunction::should_have_prototype() {
bool JSFunction::is_compiled() { bool JSFunction::is_compiled() {
return shared()->is_compiled(); return code() != Builtins::builtin(Builtins::LazyCompile);
} }
@ -2818,12 +2816,6 @@ void Proxy::set_proxy(Address value) {
} }
void Proxy::ProxyIterateBody(ObjectVisitor* visitor) {
visitor->VisitExternalReference(
reinterpret_cast<Address *>(FIELD_ADDR(this, kProxyOffset)));
}
ACCESSORS(JSValue, value, Object, kValueOffset) ACCESSORS(JSValue, value, Object, kValueOffset)
@ -3307,6 +3299,74 @@ Object* FixedArray::Copy() {
} }
int JSObject::BodyDescriptor::SizeOf(Map* map, HeapObject* object) {
return map->instance_size();
}
void Proxy::ProxyIterateBody(ObjectVisitor* v) {
v->VisitExternalReference(
reinterpret_cast<Address *>(FIELD_ADDR(this, kProxyOffset)));
}
template<typename StaticVisitor>
void Proxy::ProxyIterateBody() {
StaticVisitor::VisitExternalReference(
reinterpret_cast<Address *>(FIELD_ADDR(this, kProxyOffset)));
}
void ExternalAsciiString::ExternalAsciiStringIterateBody(ObjectVisitor* v) {
typedef v8::String::ExternalAsciiStringResource Resource;
v->VisitExternalAsciiString(
reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
}
template<typename StaticVisitor>
void ExternalAsciiString::ExternalAsciiStringIterateBody() {
typedef v8::String::ExternalAsciiStringResource Resource;
StaticVisitor::VisitExternalAsciiString(
reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
}
void ExternalTwoByteString::ExternalTwoByteStringIterateBody(ObjectVisitor* v) {
typedef v8::String::ExternalStringResource Resource;
v->VisitExternalTwoByteString(
reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
}
template<typename StaticVisitor>
void ExternalTwoByteString::ExternalTwoByteStringIterateBody() {
typedef v8::String::ExternalStringResource Resource;
StaticVisitor::VisitExternalTwoByteString(
reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
}
#define SLOT_ADDR(obj, offset) \
reinterpret_cast<Object**>((obj)->address() + offset)
template<int start_offset, int end_offset, int size>
void FixedBodyDescriptor<start_offset, end_offset, size>::IterateBody(
HeapObject* obj,
ObjectVisitor* v) {
v->VisitPointers(SLOT_ADDR(obj, start_offset), SLOT_ADDR(obj, end_offset));
}
template<int start_offset>
void FlexibleBodyDescriptor<start_offset>::IterateBody(HeapObject* obj,
int object_size,
ObjectVisitor* v) {
v->VisitPointers(SLOT_ADDR(obj, start_offset), SLOT_ADDR(obj, object_size));
}
#undef SLOT_ADDR
#undef CAST_ACCESSOR #undef CAST_ACCESSOR
#undef INT_ACCESSORS #undef INT_ACCESSORS
#undef SMI_ACCESSORS #undef SMI_ACCESSORS

139
deps/v8/src/objects-visiting.cc

@ -0,0 +1,139 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "ic-inl.h"
#include "objects-visiting.h"
namespace v8 {
namespace internal {
static inline bool IsShortcutCandidate(int type) {
return ((type & kShortcutTypeMask) == kShortcutTypeTag);
}
StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
int instance_type,
int instance_size) {
if (instance_type < FIRST_NONSTRING_TYPE) {
switch (instance_type & kStringRepresentationMask) {
case kSeqStringTag:
if ((instance_type & kStringEncodingMask) == kAsciiStringTag) {
return kVisitSeqAsciiString;
} else {
return kVisitSeqTwoByteString;
}
case kConsStringTag:
if (IsShortcutCandidate(instance_type)) {
return kVisitShortcutCandidate;
} else {
return kVisitConsString;
}
case kExternalStringTag:
return GetVisitorIdForSize(kVisitDataObject,
kVisitDataObjectGeneric,
ExternalString::kSize);
}
UNREACHABLE();
}
switch (instance_type) {
case BYTE_ARRAY_TYPE:
return kVisitByteArray;
case FIXED_ARRAY_TYPE:
return kVisitFixedArray;
case ODDBALL_TYPE:
return kVisitOddball;
case MAP_TYPE:
return kVisitMap;
case CODE_TYPE:
return kVisitCode;
case JS_GLOBAL_PROPERTY_CELL_TYPE:
return kVisitPropertyCell;
case SHARED_FUNCTION_INFO_TYPE:
return kVisitSharedFunctionInfo;
case PROXY_TYPE:
return GetVisitorIdForSize(kVisitDataObject,
kVisitDataObjectGeneric,
Proxy::kSize);
case FILLER_TYPE:
return kVisitDataObjectGeneric;
case JS_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_VALUE_TYPE:
case JS_ARRAY_TYPE:
case JS_REGEXP_TYPE:
case JS_FUNCTION_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
case JS_BUILTINS_OBJECT_TYPE:
return GetVisitorIdForSize(kVisitJSObject,
kVisitJSObjectGeneric,
instance_size);
case HEAP_NUMBER_TYPE:
case PIXEL_ARRAY_TYPE:
case EXTERNAL_BYTE_ARRAY_TYPE:
case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
case EXTERNAL_SHORT_ARRAY_TYPE:
case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
case EXTERNAL_INT_ARRAY_TYPE:
case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
case EXTERNAL_FLOAT_ARRAY_TYPE:
return GetVisitorIdForSize(kVisitDataObject,
kVisitDataObjectGeneric,
instance_size);
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE:
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
return GetVisitorIdForSize(kVisitStruct,
kVisitStructGeneric,
instance_size);
default:
UNREACHABLE();
return kVisitorIdCount;
}
}
} } // namespace v8::internal

382
deps/v8/src/objects-visiting.h

@ -0,0 +1,382 @@
// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_OBJECTS_ITERATION_H_
#define V8_OBJECTS_ITERATION_H_
// This file provides base classes and auxiliary methods for defining
// static object visitors used during GC.
// Visiting HeapObject body with a normal ObjectVisitor requires performing
// two switches on object's instance type to determine object size and layout
// and one or more virtual method calls on visitor itself.
// Static visitor is different: it provides a dispatch table which contains
// pointers to specialized visit functions. Each map has the visitor_id
// field which contains an index of specialized visitor to use.
namespace v8 {
namespace internal {
// Base class for all static visitors.
class StaticVisitorBase : public AllStatic {
public:
enum VisitorId {
kVisitSeqAsciiString = 0,
kVisitSeqTwoByteString,
kVisitShortcutCandidate,
kVisitByteArray,
kVisitFixedArray,
// For data objects, JS objects and structs along with generic visitor which
// can visit object of any size we provide visitors specialized by
// object size in words.
// Ids of specialized visitors are declared in a linear order (without
// holes) starting from the id of visitor specialized for 2 words objects
// (base visitor id) and ending with the id of generic visitor.
// Method GetVisitorIdForSize depends on this ordering to calculate visitor
// id of specialized visitor from given instance size, base visitor id and
// generic visitor's id.
kVisitDataObject,
kVisitDataObject2 = kVisitDataObject,
kVisitDataObject3,
kVisitDataObject4,
kVisitDataObject5,
kVisitDataObject6,
kVisitDataObject7,
kVisitDataObject8,
kVisitDataObject9,
kVisitDataObjectGeneric,
kVisitJSObject,
kVisitJSObject2 = kVisitJSObject,
kVisitJSObject3,
kVisitJSObject4,
kVisitJSObject5,
kVisitJSObject6,
kVisitJSObject7,
kVisitJSObject8,
kVisitJSObject9,
kVisitJSObjectGeneric,
kVisitStruct,
kVisitStruct2 = kVisitStruct,
kVisitStruct3,
kVisitStruct4,
kVisitStruct5,
kVisitStruct6,
kVisitStruct7,
kVisitStruct8,
kVisitStruct9,
kVisitStructGeneric,
kVisitConsString,
kVisitOddball,
kVisitCode,
kVisitMap,
kVisitPropertyCell,
kVisitSharedFunctionInfo,
kVisitorIdCount,
kMinObjectSizeInWords = 2
};
// Determine which specialized visitor should be used for given instance type
// and instance type.
static VisitorId GetVisitorId(int instance_type, int instance_size);
static VisitorId GetVisitorId(Map* map) {
return GetVisitorId(map->instance_type(), map->instance_size());
}
// For visitors that allow specialization by size calculate VisitorId based
// on size, base visitor id and generic visitor id.
static VisitorId GetVisitorIdForSize(VisitorId base,
VisitorId generic,
int object_size) {
ASSERT((base == kVisitDataObject) ||
(base == kVisitStruct) ||
(base == kVisitJSObject));
ASSERT(IsAligned(object_size, kPointerSize));
ASSERT(kMinObjectSizeInWords * kPointerSize <= object_size);
ASSERT(object_size < Page::kMaxHeapObjectSize);
const VisitorId specialization = static_cast<VisitorId>(
base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords);
return Min(specialization, generic);
}
};
template<typename Callback>
class VisitorDispatchTable {
public:
inline Callback GetVisitor(Map* map) {
return callbacks_[map->visitor_id()];
}
void Register(StaticVisitorBase::VisitorId id, Callback callback) {
ASSERT((0 <= id) && (id < StaticVisitorBase::kVisitorIdCount));
callbacks_[id] = callback;
}
template<typename Visitor,
StaticVisitorBase::VisitorId base,
StaticVisitorBase::VisitorId generic,
int object_size_in_words>
void RegisterSpecialization() {
static const int size = object_size_in_words * kPointerSize;
Register(StaticVisitorBase::GetVisitorIdForSize(base, generic, size),
&Visitor::template VisitSpecialized<size>);
}
template<typename Visitor,
StaticVisitorBase::VisitorId base,
StaticVisitorBase::VisitorId generic>
void RegisterSpecializations() {
STATIC_ASSERT(
(generic - base + StaticVisitorBase::kMinObjectSizeInWords) == 10);
RegisterSpecialization<Visitor, base, generic, 2>();
RegisterSpecialization<Visitor, base, generic, 3>();
RegisterSpecialization<Visitor, base, generic, 4>();
RegisterSpecialization<Visitor, base, generic, 5>();
RegisterSpecialization<Visitor, base, generic, 6>();
RegisterSpecialization<Visitor, base, generic, 7>();
RegisterSpecialization<Visitor, base, generic, 8>();
RegisterSpecialization<Visitor, base, generic, 9>();
Register(generic, &Visitor::Visit);
}
private:
Callback callbacks_[StaticVisitorBase::kVisitorIdCount];
};
template<typename StaticVisitor>
class BodyVisitorBase : public AllStatic {
public:
static inline void IteratePointers(HeapObject* object,
int start_offset,
int end_offset) {
Object** start_slot = reinterpret_cast<Object**>(object->address() +
start_offset);
Object** end_slot = reinterpret_cast<Object**>(object->address() +
end_offset);
StaticVisitor::VisitPointers(start_slot, end_slot);
}
};
template<typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> {
public:
static inline ReturnType Visit(Map* map, HeapObject* object) {
int object_size = BodyDescriptor::SizeOf(map, object);
IteratePointers(object, BodyDescriptor::kStartOffset, object_size);
return static_cast<ReturnType>(object_size);
}
template<int object_size>
static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) {
IteratePointers(object, BodyDescriptor::kStartOffset, object_size);
return static_cast<ReturnType>(object_size);
}
};
template<typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> {
public:
static inline ReturnType Visit(Map* map, HeapObject* object) {
IteratePointers(object,
BodyDescriptor::kStartOffset,
BodyDescriptor::kEndOffset);
return static_cast<ReturnType>(BodyDescriptor::kSize);
}
};
// Base class for visitors used for a linear new space iteration.
// IterateBody returns size of visited object.
// Certain types of objects (i.e. Code objects) are not handled
// by dispatch table of this visitor because they cannot appear
// in the new space.
//
// This class is intended to be used in the following way:
//
// class SomeVisitor : public StaticNewSpaceVisitor<SomeVisitor> {
// ...
// }
//
// This is an example of Curiously recurring template pattern
// (see http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern).
// We use CRTP to guarantee aggressive compile time optimizations (i.e.
// inlining and specialization of StaticVisitor::VisitPointers methods).
template<typename StaticVisitor>
class StaticNewSpaceVisitor : public StaticVisitorBase {
public:
static void Initialize() {
table_.Register(kVisitShortcutCandidate,
&FixedBodyVisitor<StaticVisitor,
ConsString::BodyDescriptor,
int>::Visit);
table_.Register(kVisitConsString,
&FixedBodyVisitor<StaticVisitor,
ConsString::BodyDescriptor,
int>::Visit);
table_.Register(kVisitFixedArray,
&FlexibleBodyVisitor<StaticVisitor,
FixedArray::BodyDescriptor,
int>::Visit);
table_.Register(kVisitByteArray, &VisitByteArray);
table_.Register(kVisitSharedFunctionInfo,
&FixedBodyVisitor<StaticVisitor,
SharedFunctionInfo::BodyDescriptor,
int>::Visit);
table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString);
table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
table_.RegisterSpecializations<DataObjectVisitor,
kVisitDataObject,
kVisitDataObjectGeneric>();
table_.RegisterSpecializations<JSObjectVisitor,
kVisitJSObject,
kVisitJSObjectGeneric>();
table_.RegisterSpecializations<StructVisitor,
kVisitStruct,
kVisitStructGeneric>();
}
static inline int IterateBody(Map* map, HeapObject* obj) {
return table_.GetVisitor(map)(map, obj);
}
static inline void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(p);
}
private:
static inline int VisitByteArray(Map* map, HeapObject* object) {
return reinterpret_cast<ByteArray*>(object)->ByteArraySize();
}
static inline int VisitSeqAsciiString(Map* map, HeapObject* object) {
return SeqAsciiString::cast(object)->
SeqAsciiStringSize(map->instance_type());
}
static inline int VisitSeqTwoByteString(Map* map, HeapObject* object) {
return SeqTwoByteString::cast(object)->
SeqTwoByteStringSize(map->instance_type());
}
class DataObjectVisitor {
public:
template<int object_size>
static inline int VisitSpecialized(Map* map, HeapObject* object) {
return object_size;
}
static inline int Visit(Map* map, HeapObject* object) {
return map->instance_size();
}
};
typedef FlexibleBodyVisitor<StaticVisitor,
StructBodyDescriptor,
int> StructVisitor;
typedef FlexibleBodyVisitor<StaticVisitor,
JSObject::BodyDescriptor,
int> JSObjectVisitor;
typedef int (*Callback)(Map* map, HeapObject* object);
static VisitorDispatchTable<Callback> table_;
};
template<typename StaticVisitor>
VisitorDispatchTable<typename StaticNewSpaceVisitor<StaticVisitor>::Callback>
StaticNewSpaceVisitor<StaticVisitor>::table_;
void Code::CodeIterateBody(ObjectVisitor* v) {
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
// Use the relocation info pointer before it is visited by
// the heap compaction in the next statement.
RelocIterator it(this, mode_mask);
IteratePointers(v,
kRelocationInfoOffset,
kRelocationInfoOffset + kPointerSize);
for (; !it.done(); it.next()) {
it.rinfo()->Visit(v);
}
}
template<typename StaticVisitor>
void Code::CodeIterateBody() {
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
// Use the relocation info pointer before it is visited by
// the heap compaction in the next statement.
RelocIterator it(this, mode_mask);
StaticVisitor::VisitPointer(
reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
for (; !it.done(); it.next()) {
it.rinfo()->template Visit<StaticVisitor>();
}
}
} } // namespace v8::internal
#endif // V8_OBJECTS_ITERATION_H_

178
deps/v8/src/objects.cc

@ -33,6 +33,7 @@
#include "debug.h" #include "debug.h"
#include "execution.h" #include "execution.h"
#include "objects-inl.h" #include "objects-inl.h"
#include "objects-visiting.h"
#include "macro-assembler.h" #include "macro-assembler.h"
#include "scanner.h" #include "scanner.h"
#include "scopeinfo.h" #include "scopeinfo.h"
@ -1042,7 +1043,7 @@ int HeapObject::SlowSizeFromMap(Map* map) {
switch (instance_type) { switch (instance_type) {
case FIXED_ARRAY_TYPE: case FIXED_ARRAY_TYPE:
return reinterpret_cast<FixedArray*>(this)->FixedArraySize(); return FixedArray::BodyDescriptor::SizeOf(map, this);
case BYTE_ARRAY_TYPE: case BYTE_ARRAY_TYPE:
return reinterpret_cast<ByteArray*>(this)->ByteArraySize(); return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
case CODE_TYPE: case CODE_TYPE:
@ -1073,7 +1074,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case kSeqStringTag: case kSeqStringTag:
break; break;
case kConsStringTag: case kConsStringTag:
reinterpret_cast<ConsString*>(this)->ConsStringIterateBody(v); ConsString::BodyDescriptor::IterateBody(this, v);
break; break;
case kExternalStringTag: case kExternalStringTag:
if ((type & kStringEncodingMask) == kAsciiStringTag) { if ((type & kStringEncodingMask) == kAsciiStringTag) {
@ -1090,7 +1091,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
switch (type) { switch (type) {
case FIXED_ARRAY_TYPE: case FIXED_ARRAY_TYPE:
reinterpret_cast<FixedArray*>(this)->FixedArrayIterateBody(v); FixedArray::BodyDescriptor::IterateBody(this, object_size, v);
break; break;
case JS_OBJECT_TYPE: case JS_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE: case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
@ -1101,23 +1102,22 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case JS_GLOBAL_PROXY_TYPE: case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE: case JS_GLOBAL_OBJECT_TYPE:
case JS_BUILTINS_OBJECT_TYPE: case JS_BUILTINS_OBJECT_TYPE:
reinterpret_cast<JSObject*>(this)->JSObjectIterateBody(object_size, v); JSObject::BodyDescriptor::IterateBody(this, object_size, v);
break; break;
case ODDBALL_TYPE: case ODDBALL_TYPE:
reinterpret_cast<Oddball*>(this)->OddballIterateBody(v); Oddball::BodyDescriptor::IterateBody(this, v);
break; break;
case PROXY_TYPE: case PROXY_TYPE:
reinterpret_cast<Proxy*>(this)->ProxyIterateBody(v); reinterpret_cast<Proxy*>(this)->ProxyIterateBody(v);
break; break;
case MAP_TYPE: case MAP_TYPE:
reinterpret_cast<Map*>(this)->MapIterateBody(v); Map::BodyDescriptor::IterateBody(this, v);
break; break;
case CODE_TYPE: case CODE_TYPE:
reinterpret_cast<Code*>(this)->CodeIterateBody(v); reinterpret_cast<Code*>(this)->CodeIterateBody(v);
break; break;
case JS_GLOBAL_PROPERTY_CELL_TYPE: case JS_GLOBAL_PROPERTY_CELL_TYPE:
reinterpret_cast<JSGlobalPropertyCell*>(this) JSGlobalPropertyCell::BodyDescriptor::IterateBody(this, v);
->JSGlobalPropertyCellIterateBody(v);
break; break;
case HEAP_NUMBER_TYPE: case HEAP_NUMBER_TYPE:
case FILLER_TYPE: case FILLER_TYPE:
@ -1131,16 +1131,15 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE: case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
case EXTERNAL_FLOAT_ARRAY_TYPE: case EXTERNAL_FLOAT_ARRAY_TYPE:
break; break;
case SHARED_FUNCTION_INFO_TYPE: { case SHARED_FUNCTION_INFO_TYPE:
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(this); SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
shared->SharedFunctionInfoIterateBody(v);
break; break;
}
#define MAKE_STRUCT_CASE(NAME, Name, name) \ #define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE: case NAME##_TYPE:
STRUCT_LIST(MAKE_STRUCT_CASE) STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE #undef MAKE_STRUCT_CASE
IterateStructBody(object_size, v); StructBodyDescriptor::IterateBody(this, object_size, v);
break; break;
default: default:
PrintF("Unknown type: %d\n", type); PrintF("Unknown type: %d\n", type);
@ -1156,11 +1155,23 @@ void HeapObject::IterateStructBody(int object_size, ObjectVisitor* v) {
Object* HeapNumber::HeapNumberToBoolean() { Object* HeapNumber::HeapNumberToBoolean() {
// NaN, +0, and -0 should return the false object // NaN, +0, and -0 should return the false object
switch (fpclassify(value())) { #if __BYTE_ORDER == __LITTLE_ENDIAN
case FP_NAN: // fall through union IeeeDoubleLittleEndianArchType u;
case FP_ZERO: return Heap::false_value(); #elif __BYTE_ORDER == __BIG_ENDIAN
default: return Heap::true_value(); union IeeeDoubleBigEndianArchType u;
#endif
u.d = value();
if (u.bits.exp == 2047) {
// Detect NaN for IEEE double precision floating point.
if ((u.bits.man_low | u.bits.man_high) != 0)
return Heap::false_value();
} }
if (u.bits.exp == 0) {
// Detect +0, and -0 for IEEE double precision floating point.
if ((u.bits.man_low | u.bits.man_high) == 0)
return Heap::false_value();
}
return Heap::true_value();
} }
@ -1209,12 +1220,6 @@ String* JSObject::constructor_name() {
} }
void JSObject::JSObjectIterateBody(int object_size, ObjectVisitor* v) {
// Iterate over all fields in the body. Assumes all are Object*.
IteratePointers(v, kPropertiesOffset, object_size);
}
Object* JSObject::AddFastPropertyUsingMap(Map* new_map, Object* JSObject::AddFastPropertyUsingMap(Map* new_map,
String* name, String* name,
Object* value) { Object* value) {
@ -1337,7 +1342,7 @@ Object* JSObject::AddConstantFunctionProperty(String* name,
if (attributes != NONE) { if (attributes != NONE) {
return function; return function;
} }
ConstTransitionDescriptor mark(name); ConstTransitionDescriptor mark(name, Map::cast(new_map));
new_descriptors = new_descriptors =
old_map->instance_descriptors()->CopyInsert(&mark, KEEP_TRANSITIONS); old_map->instance_descriptors()->CopyInsert(&mark, KEEP_TRANSITIONS);
if (new_descriptors->IsFailure()) { if (new_descriptors->IsFailure()) {
@ -1695,11 +1700,7 @@ bool JSObject::SetElementWithCallbackSetterInPrototypes(uint32_t index,
void JSObject::LookupInDescriptor(String* name, LookupResult* result) { void JSObject::LookupInDescriptor(String* name, LookupResult* result) {
DescriptorArray* descriptors = map()->instance_descriptors(); DescriptorArray* descriptors = map()->instance_descriptors();
int number = DescriptorLookupCache::Lookup(descriptors, name); int number = descriptors->SearchWithCache(name);
if (number == DescriptorLookupCache::kAbsent) {
number = descriptors->Search(name);
DescriptorLookupCache::Update(descriptors, name, number);
}
if (number != DescriptorArray::kNotFound) { if (number != DescriptorArray::kNotFound) {
result->DescriptorResult(this, descriptors->GetDetails(number), number); result->DescriptorResult(this, descriptors->GetDetails(number), number);
} else { } else {
@ -1817,8 +1818,10 @@ Object* JSObject::SetPropertyWithFailedAccessCheck(LookupResult* result,
} }
} }
HandleScope scope;
Handle<Object> value_handle(value);
Top::ReportFailedAccessCheck(this, v8::ACCESS_SET); Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
return value; return *value_handle;
} }
@ -1896,10 +1899,25 @@ Object* JSObject::SetProperty(LookupResult* result,
result->holder()); result->holder());
case INTERCEPTOR: case INTERCEPTOR:
return SetPropertyWithInterceptor(name, value, attributes); return SetPropertyWithInterceptor(name, value, attributes);
case CONSTANT_TRANSITION: case CONSTANT_TRANSITION: {
// Replace with a MAP_TRANSITION to a new map with a FIELD, even // If the same constant function is being added we can simply
// if the value is a function. // transition to the target map.
Map* target_map = result->GetTransitionMap();
DescriptorArray* target_descriptors = target_map->instance_descriptors();
int number = target_descriptors->SearchWithCache(name);
ASSERT(number != DescriptorArray::kNotFound);
ASSERT(target_descriptors->GetType(number) == CONSTANT_FUNCTION);
JSFunction* function =
JSFunction::cast(target_descriptors->GetValue(number));
ASSERT(!Heap::InNewSpace(function));
if (value == function) {
set_map(target_map);
return value;
}
// Otherwise, replace with a MAP_TRANSITION to a new map with a
// FIELD, even if the value is a constant function.
return ConvertDescriptorToFieldAndMapTransition(name, value, attributes); return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
}
case NULL_DESCRIPTOR: case NULL_DESCRIPTOR:
return ConvertDescriptorToFieldAndMapTransition(name, value, attributes); return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
default: default:
@ -2190,8 +2208,7 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
int new_instance_size = map()->instance_size() - instance_size_delta; int new_instance_size = map()->instance_size() - instance_size_delta;
new_map->set_inobject_properties(0); new_map->set_inobject_properties(0);
new_map->set_instance_size(new_instance_size); new_map->set_instance_size(new_instance_size);
new_map->set_scavenger(Heap::GetScavenger(new_map->instance_type(), new_map->set_visitor_id(StaticVisitorBase::GetVisitorId(new_map));
new_map->instance_size()));
Heap::CreateFillerObjectAt(this->address() + new_instance_size, Heap::CreateFillerObjectAt(this->address() + new_instance_size,
instance_size_delta); instance_size_delta);
} }
@ -3407,11 +3424,6 @@ void CodeCacheHashTable::RemoveByIndex(int index) {
} }
void FixedArray::FixedArrayIterateBody(ObjectVisitor* v) {
IteratePointers(v, kHeaderSize, kHeaderSize + length() * kPointerSize);
}
static bool HasKey(FixedArray* array, Object* key) { static bool HasKey(FixedArray* array, Object* key) {
int len0 = array->length(); int len0 = array->length();
for (int i = 0; i < len0; i++) { for (int i = 0; i < len0; i++) {
@ -4501,16 +4513,6 @@ void ConsString::ConsStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
} }
void ConsString::ConsStringIterateBody(ObjectVisitor* v) {
IteratePointers(v, kFirstOffset, kSecondOffset + kPointerSize);
}
void JSGlobalPropertyCell::JSGlobalPropertyCellIterateBody(ObjectVisitor* v) {
IteratePointers(v, kValueOffset, kValueOffset + kPointerSize);
}
uint16_t ConsString::ConsStringGet(int index) { uint16_t ConsString::ConsStringGet(int index) {
ASSERT(index >= 0 && index < this->length()); ASSERT(index >= 0 && index < this->length());
@ -4614,24 +4616,6 @@ void String::WriteToFlat(String* src,
} }
#define FIELD_ADDR(p, offset) \
(reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
void ExternalAsciiString::ExternalAsciiStringIterateBody(ObjectVisitor* v) {
typedef v8::String::ExternalAsciiStringResource Resource;
v->VisitExternalAsciiString(
reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
}
void ExternalTwoByteString::ExternalTwoByteStringIterateBody(ObjectVisitor* v) {
typedef v8::String::ExternalStringResource Resource;
v->VisitExternalTwoByteString(
reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
}
#undef FIELD_ADDR
template <typename IteratorA, typename IteratorB> template <typename IteratorA, typename IteratorB>
static inline bool CompareStringContents(IteratorA* ia, IteratorB* ib) { static inline bool CompareStringContents(IteratorA* ia, IteratorB* ib) {
// General slow case check. We know that the ia and ib iterators // General slow case check. We know that the ia and ib iterators
@ -4977,7 +4961,8 @@ void String::PrintOn(FILE* file) {
void Map::CreateBackPointers() { void Map::CreateBackPointers() {
DescriptorArray* descriptors = instance_descriptors(); DescriptorArray* descriptors = instance_descriptors();
for (int i = 0; i < descriptors->number_of_descriptors(); i++) { for (int i = 0; i < descriptors->number_of_descriptors(); i++) {
if (descriptors->GetType(i) == MAP_TRANSITION) { if (descriptors->GetType(i) == MAP_TRANSITION ||
descriptors->GetType(i) == CONSTANT_TRANSITION) {
// Get target. // Get target.
Map* target = Map::cast(descriptors->GetValue(i)); Map* target = Map::cast(descriptors->GetValue(i));
#ifdef DEBUG #ifdef DEBUG
@ -5018,7 +5003,8 @@ void Map::ClearNonLiveTransitions(Object* real_prototype) {
// map is not reached again by following a back pointer from a // map is not reached again by following a back pointer from a
// non-live object. // non-live object.
PropertyDetails details(Smi::cast(contents->get(i + 1))); PropertyDetails details(Smi::cast(contents->get(i + 1)));
if (details.type() == MAP_TRANSITION) { if (details.type() == MAP_TRANSITION ||
details.type() == CONSTANT_TRANSITION) {
Map* target = reinterpret_cast<Map*>(contents->get(i)); Map* target = reinterpret_cast<Map*>(contents->get(i));
ASSERT(target->IsHeapObject()); ASSERT(target->IsHeapObject());
if (!target->IsMarked()) { if (!target->IsMarked()) {
@ -5035,12 +5021,6 @@ void Map::ClearNonLiveTransitions(Object* real_prototype) {
} }
void Map::MapIterateBody(ObjectVisitor* v) {
// Assumes all Object* members are contiguously allocated!
IteratePointers(v, kPointerFieldsBeginOffset, kPointerFieldsEndOffset);
}
Object* JSFunction::SetInstancePrototype(Object* value) { Object* JSFunction::SetInstancePrototype(Object* value) {
ASSERT(value->IsJSObject()); ASSERT(value->IsJSObject());
@ -5104,12 +5084,6 @@ Context* JSFunction::GlobalContextFromLiterals(FixedArray* literals) {
} }
void Oddball::OddballIterateBody(ObjectVisitor* v) {
// Assumes all Object* members are contiguously allocated!
IteratePointers(v, kToStringOffset, kToNumberOffset + kPointerSize);
}
Object* Oddball::Initialize(const char* to_string, Object* to_number) { Object* Oddball::Initialize(const char* to_string, Object* to_number) {
Object* symbol = Heap::LookupAsciiSymbol(to_string); Object* symbol = Heap::LookupAsciiSymbol(to_string);
if (symbol->IsFailure()) return symbol; if (symbol->IsFailure()) return symbol;
@ -5282,13 +5256,6 @@ void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
} }
void SharedFunctionInfo::SharedFunctionInfoIterateBody(ObjectVisitor* v) {
IteratePointers(v,
kNameOffset,
kThisPropertyAssignmentsOffset + kPointerSize);
}
void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) { void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
@ -5310,28 +5277,6 @@ void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
} }
void Code::CodeIterateBody(ObjectVisitor* v) {
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
// Use the relocation info pointer before it is visited by
// the heap compaction in the next statement.
RelocIterator it(this, mode_mask);
IteratePointers(v,
kRelocationInfoOffset,
kRelocationInfoOffset + kPointerSize);
for (; !it.done(); it.next()) {
it.rinfo()->Visit(v);
}
}
void Code::Relocate(intptr_t delta) { void Code::Relocate(intptr_t delta) {
for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) { for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
it.rinfo()->apply(delta); it.rinfo()->apply(delta);
@ -5826,13 +5771,16 @@ bool JSObject::HasElementWithInterceptor(JSObject* receiver, uint32_t index) {
v8::IndexedPropertyQuery query = v8::IndexedPropertyQuery query =
v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query()); v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
LOG(ApiIndexedPropertyAccess("interceptor-indexed-has", this, index)); LOG(ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
v8::Handle<v8::Boolean> result; v8::Handle<v8::Integer> result;
{ {
// Leaving JavaScript. // Leaving JavaScript.
VMState state(EXTERNAL); VMState state(EXTERNAL);
result = query(index, info); result = query(index, info);
} }
if (!result.IsEmpty()) return result->IsTrue(); if (!result.IsEmpty()) {
ASSERT(result->IsInt32());
return true; // absence of property is signaled by empty handle.
}
} else if (!interceptor->getter()->IsUndefined()) { } else if (!interceptor->getter()->IsUndefined()) {
v8::IndexedPropertyGetter getter = v8::IndexedPropertyGetter getter =
v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter()); v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
@ -6151,8 +6099,10 @@ Object* JSObject::SetElement(uint32_t index, Object* value) {
// Check access rights if needed. // Check access rights if needed.
if (IsAccessCheckNeeded() && if (IsAccessCheckNeeded() &&
!Top::MayIndexedAccess(this, index, v8::ACCESS_SET)) { !Top::MayIndexedAccess(this, index, v8::ACCESS_SET)) {
HandleScope scope;
Handle<Object> value_handle(value);
Top::ReportFailedAccessCheck(this, v8::ACCESS_SET); Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
return value; return *value_handle;
} }
if (IsJSGlobalProxy()) { if (IsJSGlobalProxy()) {

140
deps/v8/src/objects.h

@ -1106,6 +1106,51 @@ class HeapObject: public Object {
}; };
#define SLOT_ADDR(obj, offset) \
reinterpret_cast<Object**>((obj)->address() + offset)
// This class describes a body of an object of a fixed size
// in which all pointer fields are located in the [start_offset, end_offset)
// interval.
template<int start_offset, int end_offset, int size>
class FixedBodyDescriptor {
public:
static const int kStartOffset = start_offset;
static const int kEndOffset = end_offset;
static const int kSize = size;
static inline void IterateBody(HeapObject* obj, ObjectVisitor* v);
template<typename StaticVisitor>
static inline void IterateBody(HeapObject* obj) {
StaticVisitor::VisitPointers(SLOT_ADDR(obj, start_offset),
SLOT_ADDR(obj, end_offset));
}
};
// This class describes a body of an object of a variable size
// in which all pointer fields are located in the [start_offset, object_size)
// interval.
template<int start_offset>
class FlexibleBodyDescriptor {
public:
static const int kStartOffset = start_offset;
static inline void IterateBody(HeapObject* obj,
int object_size,
ObjectVisitor* v);
template<typename StaticVisitor>
static inline void IterateBody(HeapObject* obj, int object_size) {
StaticVisitor::VisitPointers(SLOT_ADDR(obj, start_offset),
SLOT_ADDR(obj, object_size));
}
};
#undef SLOT_ADDR
// The HeapNumber class describes heap allocated numbers that cannot be // The HeapNumber class describes heap allocated numbers that cannot be
// represented in a Smi (small integer) // represented in a Smi (small integer)
class HeapNumber: public HeapObject { class HeapNumber: public HeapObject {
@ -1522,7 +1567,6 @@ class JSObject: public HeapObject {
// Dispatched behavior. // Dispatched behavior.
void JSObjectIterateBody(int object_size, ObjectVisitor* v);
void JSObjectShortPrint(StringStream* accumulator); void JSObjectShortPrint(StringStream* accumulator);
#ifdef DEBUG #ifdef DEBUG
void JSObjectPrint(); void JSObjectPrint();
@ -1578,6 +1622,11 @@ class JSObject: public HeapObject {
STATIC_CHECK(kHeaderSize == Internals::kJSObjectHeaderSize); STATIC_CHECK(kHeaderSize == Internals::kJSObjectHeaderSize);
class BodyDescriptor : public FlexibleBodyDescriptor<kPropertiesOffset> {
public:
static inline int SizeOf(Map* map, HeapObject* object);
};
private: private:
Object* GetElementWithCallback(Object* receiver, Object* GetElementWithCallback(Object* receiver,
Object* structure, Object* structure,
@ -1692,8 +1741,6 @@ class FixedArray: public HeapObject {
static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize; static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize;
// Dispatched behavior. // Dispatched behavior.
int FixedArraySize() { return SizeFor(length()); }
void FixedArrayIterateBody(ObjectVisitor* v);
#ifdef DEBUG #ifdef DEBUG
void FixedArrayPrint(); void FixedArrayPrint();
void FixedArrayVerify(); void FixedArrayVerify();
@ -1711,6 +1758,13 @@ class FixedArray: public HeapObject {
// object, the prefix of this array is sorted. // object, the prefix of this array is sorted.
void SortPairs(FixedArray* numbers, uint32_t len); void SortPairs(FixedArray* numbers, uint32_t len);
class BodyDescriptor : public FlexibleBodyDescriptor<kHeaderSize> {
public:
static inline int SizeOf(Map* map, HeapObject* object) {
return SizeFor(reinterpret_cast<FixedArray*>(object)->length());
}
};
protected: protected:
// Set operation on FixedArray without using write barriers. Can // Set operation on FixedArray without using write barriers. Can
// only be used for storing old space objects or smis. // only be used for storing old space objects or smis.
@ -1811,6 +1865,10 @@ class DescriptorArray: public FixedArray {
// Search the instance descriptors for given name. // Search the instance descriptors for given name.
inline int Search(String* name); inline int Search(String* name);
// As the above, but uses DescriptorLookupCache and updates it when
// necessary.
inline int SearchWithCache(String* name);
// Tells whether the name is present int the array. // Tells whether the name is present int the array.
bool Contains(String* name) { return kNotFound != Search(name); } bool Contains(String* name) { return kNotFound != Search(name); }
@ -2426,7 +2484,9 @@ class ByteArray: public HeapObject {
static inline ByteArray* cast(Object* obj); static inline ByteArray* cast(Object* obj);
// Dispatched behavior. // Dispatched behavior.
int ByteArraySize() { return SizeFor(length()); } inline int ByteArraySize() {
return SizeFor(this->length());
}
#ifdef DEBUG #ifdef DEBUG
void ByteArrayPrint(); void ByteArrayPrint();
void ByteArrayVerify(); void ByteArrayVerify();
@ -2847,7 +2907,10 @@ class Code: public HeapObject {
// Dispatched behavior. // Dispatched behavior.
int CodeSize() { return SizeFor(body_size()); } int CodeSize() { return SizeFor(body_size()); }
void CodeIterateBody(ObjectVisitor* v); inline void CodeIterateBody(ObjectVisitor* v);
template<typename StaticVisitor>
inline void CodeIterateBody();
#ifdef DEBUG #ifdef DEBUG
void CodePrint(); void CodePrint();
void CodeVerify(); void CodeVerify();
@ -2893,7 +2956,6 @@ class Code: public HeapObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(Code); DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
}; };
typedef void (*Scavenger)(Map* map, HeapObject** slot, HeapObject* object);
// All heap objects have a Map that describes their structure. // All heap objects have a Map that describes their structure.
// A Map contains information about: // A Map contains information about:
@ -3089,18 +3151,13 @@ class Map: public HeapObject {
void ClearNonLiveTransitions(Object* real_prototype); void ClearNonLiveTransitions(Object* real_prototype);
// Dispatched behavior. // Dispatched behavior.
void MapIterateBody(ObjectVisitor* v);
#ifdef DEBUG #ifdef DEBUG
void MapPrint(); void MapPrint();
void MapVerify(); void MapVerify();
#endif #endif
inline Scavenger scavenger(); inline int visitor_id();
inline void set_scavenger(Scavenger callback); inline void set_visitor_id(int visitor_id);
inline void Scavenge(HeapObject** slot, HeapObject* obj) {
scavenger()(this, slot, obj);
}
static const int kMaxPreAllocatedPropertyFields = 255; static const int kMaxPreAllocatedPropertyFields = 255;
@ -3154,12 +3211,17 @@ class Map: public HeapObject {
static const int kIsExtensible = 0; static const int kIsExtensible = 0;
static const int kFunctionWithPrototype = 1; static const int kFunctionWithPrototype = 1;
static const int kHasFastElements = 2; static const int kHasFastElements = 2;
static const int kStringWrapperSafeForDefaultValueOf = 3;
// Layout of the default cache. It holds alternating name and code objects. // Layout of the default cache. It holds alternating name and code objects.
static const int kCodeCacheEntrySize = 2; static const int kCodeCacheEntrySize = 2;
static const int kCodeCacheEntryNameOffset = 0; static const int kCodeCacheEntryNameOffset = 0;
static const int kCodeCacheEntryCodeOffset = 1; static const int kCodeCacheEntryCodeOffset = 1;
typedef FixedBodyDescriptor<kPointerFieldsBeginOffset,
kPointerFieldsEndOffset,
kSize> BodyDescriptor;
private: private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Map); DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
}; };
@ -3414,7 +3476,6 @@ class SharedFunctionInfo: public HeapObject {
int CalculateInObjectProperties(); int CalculateInObjectProperties();
// Dispatched behavior. // Dispatched behavior.
void SharedFunctionInfoIterateBody(ObjectVisitor* v);
// Set max_length to -1 for unlimited length. // Set max_length to -1 for unlimited length.
void SourceCodePrint(StringStream* accumulator, int max_length); void SourceCodePrint(StringStream* accumulator, int max_length);
#ifdef DEBUG #ifdef DEBUG
@ -3503,6 +3564,10 @@ class SharedFunctionInfo: public HeapObject {
#endif #endif
static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize); static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
typedef FixedBodyDescriptor<kNameOffset,
kThisPropertyAssignmentsOffset + kPointerSize,
kSize> BodyDescriptor;
private: private:
// Bit positions in start_position_and_type. // Bit positions in start_position_and_type.
// The source code start position is in the 30 most significant bits of // The source code start position is in the 30 most significant bits of
@ -3608,7 +3673,9 @@ class JSFunction: public JSObject {
static Context* GlobalContextFromLiterals(FixedArray* literals); static Context* GlobalContextFromLiterals(FixedArray* literals);
// Layout descriptors. // Layout descriptors.
static const int kPrototypeOrInitialMapOffset = JSObject::kHeaderSize; static const int kCodeOffset = JSObject::kHeaderSize;
static const int kPrototypeOrInitialMapOffset =
kCodeOffset + kPointerSize;
static const int kSharedFunctionInfoOffset = static const int kSharedFunctionInfoOffset =
kPrototypeOrInitialMapOffset + kPointerSize; kPrototypeOrInitialMapOffset + kPointerSize;
static const int kContextOffset = kSharedFunctionInfoOffset + kPointerSize; static const int kContextOffset = kSharedFunctionInfoOffset + kPointerSize;
@ -4551,11 +4618,6 @@ class ConsString: public String {
// Casting. // Casting.
static inline ConsString* cast(Object* obj); static inline ConsString* cast(Object* obj);
// Garbage collection support. This method is called during garbage
// collection to iterate through the heap pointers in the body of
// the ConsString.
void ConsStringIterateBody(ObjectVisitor* v);
// Layout description. // Layout description.
static const int kFirstOffset = POINTER_SIZE_ALIGN(String::kSize); static const int kFirstOffset = POINTER_SIZE_ALIGN(String::kSize);
static const int kSecondOffset = kFirstOffset + kPointerSize; static const int kSecondOffset = kFirstOffset + kPointerSize;
@ -4572,6 +4634,9 @@ class ConsString: public String {
// Minimum length for a cons string. // Minimum length for a cons string.
static const int kMinLength = 13; static const int kMinLength = 13;
typedef FixedBodyDescriptor<kFirstOffset, kSecondOffset + kPointerSize, kSize>
BodyDescriptor;
private: private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ConsString); DISALLOW_IMPLICIT_CONSTRUCTORS(ConsString);
}; };
@ -4621,7 +4686,10 @@ class ExternalAsciiString: public ExternalString {
static inline ExternalAsciiString* cast(Object* obj); static inline ExternalAsciiString* cast(Object* obj);
// Garbage collection support. // Garbage collection support.
void ExternalAsciiStringIterateBody(ObjectVisitor* v); inline void ExternalAsciiStringIterateBody(ObjectVisitor* v);
template<typename StaticVisitor>
inline void ExternalAsciiStringIterateBody();
// Support for StringInputBuffer. // Support for StringInputBuffer.
const unibrow::byte* ExternalAsciiStringReadBlock(unsigned* remaining, const unibrow::byte* ExternalAsciiStringReadBlock(unsigned* remaining,
@ -4658,7 +4726,11 @@ class ExternalTwoByteString: public ExternalString {
static inline ExternalTwoByteString* cast(Object* obj); static inline ExternalTwoByteString* cast(Object* obj);
// Garbage collection support. // Garbage collection support.
void ExternalTwoByteStringIterateBody(ObjectVisitor* v); inline void ExternalTwoByteStringIterateBody(ObjectVisitor* v);
template<typename StaticVisitor>
inline void ExternalTwoByteStringIterateBody();
// Support for StringInputBuffer. // Support for StringInputBuffer.
void ExternalTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer, void ExternalTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
@ -4769,7 +4841,6 @@ class Oddball: public HeapObject {
static inline Oddball* cast(Object* obj); static inline Oddball* cast(Object* obj);
// Dispatched behavior. // Dispatched behavior.
void OddballIterateBody(ObjectVisitor* v);
#ifdef DEBUG #ifdef DEBUG
void OddballVerify(); void OddballVerify();
#endif #endif
@ -4782,6 +4853,10 @@ class Oddball: public HeapObject {
static const int kToNumberOffset = kToStringOffset + kPointerSize; static const int kToNumberOffset = kToStringOffset + kPointerSize;
static const int kSize = kToNumberOffset + kPointerSize; static const int kSize = kToNumberOffset + kPointerSize;
typedef FixedBodyDescriptor<kToStringOffset,
kToNumberOffset + kPointerSize,
kSize> BodyDescriptor;
private: private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Oddball); DISALLOW_IMPLICIT_CONSTRUCTORS(Oddball);
}; };
@ -4795,8 +4870,6 @@ class JSGlobalPropertyCell: public HeapObject {
// Casting. // Casting.
static inline JSGlobalPropertyCell* cast(Object* obj); static inline JSGlobalPropertyCell* cast(Object* obj);
// Dispatched behavior.
void JSGlobalPropertyCellIterateBody(ObjectVisitor* v);
#ifdef DEBUG #ifdef DEBUG
void JSGlobalPropertyCellVerify(); void JSGlobalPropertyCellVerify();
void JSGlobalPropertyCellPrint(); void JSGlobalPropertyCellPrint();
@ -4806,6 +4879,10 @@ class JSGlobalPropertyCell: public HeapObject {
static const int kValueOffset = HeapObject::kHeaderSize; static const int kValueOffset = HeapObject::kHeaderSize;
static const int kSize = kValueOffset + kPointerSize; static const int kSize = kValueOffset + kPointerSize;
typedef FixedBodyDescriptor<kValueOffset,
kValueOffset + kPointerSize,
kSize> BodyDescriptor;
private: private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalPropertyCell); DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalPropertyCell);
}; };
@ -4826,6 +4903,10 @@ class Proxy: public HeapObject {
// Dispatched behavior. // Dispatched behavior.
inline void ProxyIterateBody(ObjectVisitor* v); inline void ProxyIterateBody(ObjectVisitor* v);
template<typename StaticVisitor>
inline void ProxyIterateBody();
#ifdef DEBUG #ifdef DEBUG
void ProxyPrint(); void ProxyPrint();
void ProxyVerify(); void ProxyVerify();
@ -5343,6 +5424,15 @@ class ObjectVisitor BASE_EMBEDDED {
}; };
class StructBodyDescriptor : public
FlexibleBodyDescriptor<HeapObject::kHeaderSize> {
public:
static inline int SizeOf(Map* map, HeapObject* object) {
return map->instance_size();
}
};
// BooleanBit is a helper class for setting and getting a bit in an // BooleanBit is a helper class for setting and getting a bit in an
// integer or Smi. // integer or Smi.
class BooleanBit : public AllStatic { class BooleanBit : public AllStatic {

9
deps/v8/src/parser.cc

@ -3587,10 +3587,8 @@ ObjectLiteral::Property* Parser::ParseObjectLiteralGetSet(bool is_getter,
// { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... } // { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... }
// We have already read the "get" or "set" keyword. // We have already read the "get" or "set" keyword.
Token::Value next = Next(); Token::Value next = Next();
if (next == Token::IDENTIFIER || // TODO(820): Allow NUMBER and STRING as well (and handle array indices).
next == Token::STRING || if (next == Token::IDENTIFIER || Token::IsKeyword(next)) {
next == Token::NUMBER ||
Token::IsKeyword(next)) {
Handle<String> name = Handle<String> name =
factory()->LookupSymbol(scanner_.literal_string(), factory()->LookupSymbol(scanner_.literal_string(),
scanner_.literal_length()); scanner_.literal_length());
@ -3652,8 +3650,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
factory()->LookupSymbol(scanner_.literal_string(), factory()->LookupSymbol(scanner_.literal_string(),
scanner_.literal_length()); scanner_.literal_length());
uint32_t index; uint32_t index;
if (!string.is_null() && if (!string.is_null() && string->AsArrayIndex(&index)) {
string->AsArrayIndex(&index)) {
key = NewNumberLiteral(index); key = NewNumberLiteral(index);
break; break;
} }

5
deps/v8/src/platform-linux.cc

@ -290,9 +290,10 @@ void OS::Abort() {
void OS::DebugBreak() { void OS::DebugBreak() {
// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x, // TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
// which is the architecture of generated code). // which is the architecture of generated code).
#if (defined(__arm__) || defined(__thumb__)) && \ #if (defined(__arm__) || defined(__thumb__))
defined(CAN_USE_ARMV5_INSTRUCTIONS) # if defined(CAN_USE_ARMV5_INSTRUCTIONS)
asm("bkpt 0"); asm("bkpt 0");
# endif
#elif defined(__mips__) #elif defined(__mips__)
asm("break"); asm("break");
#else #else

6
deps/v8/src/platform-nullos.cc

@ -100,6 +100,12 @@ double OS::DaylightSavingsOffset(double time) {
} }
int OS::GetLastError() {
UNIMPLEMENTED();
return 0;
}
// Returns the local time offset in milliseconds east of UTC without // Returns the local time offset in milliseconds east of UTC without
// taking daylight savings time into account. // taking daylight savings time into account.
double OS::LocalTimeOffset() { double OS::LocalTimeOffset() {

7
deps/v8/src/platform-posix.cc

@ -108,6 +108,11 @@ double OS::DaylightSavingsOffset(double time) {
} }
int OS::GetLastError() {
return errno;
}
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// POSIX stdio support. // POSIX stdio support.
// //
@ -238,7 +243,7 @@ bool POSIXSocket::Bind(const int port) {
addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
addr.sin_port = htons(port); addr.sin_port = htons(port);
int status = bind(socket_, int status = bind(socket_,
reinterpret_cast<struct sockaddr *>(&addr), BitCast<struct sockaddr *>(&addr),
sizeof(addr)); sizeof(addr));
return status == 0; return status == 0;
} }

5
deps/v8/src/platform-win32.cc

@ -651,6 +651,11 @@ double OS::DaylightSavingsOffset(double time) {
} }
int OS::GetLastError() {
return ::GetLastError();
}
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Win32 console output. // Win32 console output.
// //

3
deps/v8/src/platform.h

@ -165,6 +165,9 @@ class OS {
// Returns the daylight savings offset for the given time. // Returns the daylight savings offset for the given time.
static double DaylightSavingsOffset(double time); static double DaylightSavingsOffset(double time);
// Returns last OS error.
static int GetLastError();
static FILE* FOpen(const char* path, const char* mode); static FILE* FOpen(const char* path, const char* mode);
// Log file open mode is platform-dependent due to line ends issues. // Log file open mode is platform-dependent due to line ends issues.

580
deps/v8/src/platform.h.orig

@ -0,0 +1,580 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This module contains the platform-specific code. This make the rest of the
// code less dependent on operating system, compilers and runtime libraries.
// This module does specifically not deal with differences between different
// processor architecture.
// The platform classes have the same definition for all platforms. The
// implementation for a particular platform is put in platform_<os>.cc.
// The build system then uses the implementation for the target platform.
//
// This design has been chosen because it is simple and fast. Alternatively,
// the platform dependent classes could have been implemented using abstract
// superclasses with virtual methods and having specializations for each
// platform. This design was rejected because it was more complicated and
// slower. It would require factory methods for selecting the right
// implementation and the overhead of virtual methods for performance
// sensitive like mutex locking/unlocking.
#ifndef V8_PLATFORM_H_
#define V8_PLATFORM_H_
#define V8_INFINITY INFINITY
// Windows specific stuff.
#ifdef WIN32
// Microsoft Visual C++ specific stuff.
#ifdef _MSC_VER
enum {
FP_NAN,
FP_INFINITE,
FP_ZERO,
FP_SUBNORMAL,
FP_NORMAL
};
#undef V8_INFINITY
#define V8_INFINITY HUGE_VAL
namespace v8 {
namespace internal {
int isfinite(double x);
} }
int isnan(double x);
int isinf(double x);
int isless(double x, double y);
int isgreater(double x, double y);
int fpclassify(double x);
int signbit(double x);
int strncasecmp(const char* s1, const char* s2, int n);
#endif // _MSC_VER
// Random is missing on both Visual Studio and MinGW.
int random();
#endif // WIN32
#ifdef __sun
# ifndef signbit
int signbit(double x);
# endif
#endif
// GCC specific stuff
#ifdef __GNUC__
// Needed for va_list on at least MinGW and Android.
#include <stdarg.h>
#define __GNUC_VERSION__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100)
// Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
// warning flag and certain versions of GCC due to a bug:
// http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11931
// For now, we use the more involved template-based version from <limits>, but
// only when compiling with GCC versions affected by the bug (2.96.x - 4.0.x)
// __GNUC_PREREQ is not defined in GCC for Mac OS X, so we define our own macro
#if __GNUC_VERSION__ >= 29600 && __GNUC_VERSION__ < 40100
#include <limits>
#undef V8_INFINITY
#define V8_INFINITY std::numeric_limits<double>::infinity()
#endif
#endif // __GNUC__
namespace v8 {
namespace internal {
// Use AtomicWord for a machine-sized pointer. It is assumed that
// reads and writes of naturally aligned values of this type are atomic.
typedef intptr_t AtomicWord;
class Semaphore;
double ceiling(double x);
double modulo(double x, double y);
// Forward declarations.
class Socket;
// ----------------------------------------------------------------------------
// OS
//
// This class has static methods for the different platform specific
// functions. Add methods here to cope with differences between the
// supported platforms.
class OS {
public:
// Initializes the platform OS support. Called once at VM startup.
static void Setup();
// Returns the accumulated user time for thread. This routine
// can be used for profiling. The implementation should
// strive for high-precision timer resolution, preferable
// micro-second resolution.
static int GetUserTime(uint32_t* secs, uint32_t* usecs);
// Get a tick counter normalized to one tick per microsecond.
// Used for calculating time intervals.
static int64_t Ticks();
// Returns current time as the number of milliseconds since
// 00:00:00 UTC, January 1, 1970.
static double TimeCurrentMillis();
// Returns a string identifying the current time zone. The
// timestamp is used for determining if DST is in effect.
static const char* LocalTimezone(double time);
// Returns the local time offset in milliseconds east of UTC without
// taking daylight savings time into account.
static double LocalTimeOffset();
// Returns the daylight savings offset for the given time.
static double DaylightSavingsOffset(double time);
// Returns last OS error.
static int GetLastError();
static FILE* FOpen(const char* path, const char* mode);
// Log file open mode is platform-dependent due to line ends issues.
static const char* LogFileOpenMode;
// Print output to console. This is mostly used for debugging output.
// On platforms that has standard terminal output, the output
// should go to stdout.
static void Print(const char* format, ...);
static void VPrint(const char* format, va_list args);
// Print error output to console. This is mostly used for error message
// output. On platforms that has standard terminal output, the output
// should go to stderr.
static void PrintError(const char* format, ...);
static void VPrintError(const char* format, va_list args);
// Allocate/Free memory used by JS heap. Pages are readable/writable, but
// they are not guaranteed to be executable unless 'executable' is true.
// Returns the address of allocated memory, or NULL if failed.
static void* Allocate(const size_t requested,
size_t* allocated,
bool is_executable);
static void Free(void* address, const size_t size);
// Get the Alignment guaranteed by Allocate().
static size_t AllocateAlignment();
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect a block of memory by marking it read-only/writable.
static void Protect(void* address, size_t size);
static void Unprotect(void* address, size_t size, bool is_executable);
#endif
// Returns an indication of whether a pointer is in a space that
// has been allocated by Allocate(). This method may conservatively
// always return false, but giving more accurate information may
// improve the robustness of the stack dump code in the presence of
// heap corruption.
static bool IsOutsideAllocatedSpace(void* pointer);
// Sleep for a number of milliseconds.
static void Sleep(const int milliseconds);
// Abort the current process.
static void Abort();
// Debug break.
static void DebugBreak();
// Walk the stack.
static const int kStackWalkError = -1;
static const int kStackWalkMaxNameLen = 256;
static const int kStackWalkMaxTextLen = 256;
struct StackFrame {
void* address;
char text[kStackWalkMaxTextLen];
};
static int StackWalk(Vector<StackFrame> frames);
// Factory method for creating platform dependent Mutex.
// Please use delete to reclaim the storage for the returned Mutex.
static Mutex* CreateMutex();
// Factory method for creating platform dependent Semaphore.
// Please use delete to reclaim the storage for the returned Semaphore.
static Semaphore* CreateSemaphore(int count);
// Factory method for creating platform dependent Socket.
// Please use delete to reclaim the storage for the returned Socket.
static Socket* CreateSocket();
class MemoryMappedFile {
public:
static MemoryMappedFile* create(const char* name, int size, void* initial);
virtual ~MemoryMappedFile() { }
virtual void* memory() = 0;
};
// Safe formatting print. Ensures that str is always null-terminated.
// Returns the number of chars written, or -1 if output was truncated.
static int SNPrintF(Vector<char> str, const char* format, ...);
static int VSNPrintF(Vector<char> str,
const char* format,
va_list args);
static char* StrChr(char* str, int c);
static void StrNCpy(Vector<char> dest, const char* src, size_t n);
// Support for profiler. Can do nothing, in which case ticks
// occuring in shared libraries will not be properly accounted
// for.
static void LogSharedLibraryAddresses();
// The return value indicates the CPU features we are sure of because of the
// OS. For example MacOSX doesn't run on any x86 CPUs that don't have SSE2
// instructions.
// This is a little messy because the interpretation is subject to the cross
// of the CPU and the OS. The bits in the answer correspond to the bit
// positions indicated by the members of the CpuFeature enum from globals.h
static uint64_t CpuFeaturesImpliedByPlatform();
// Returns the double constant NAN
static double nan_value();
// Support runtime detection of VFP3 on ARM CPUs.
static bool ArmCpuHasFeature(CpuFeature feature);
// Returns the activation frame alignment constraint or zero if
// the platform doesn't care. Guaranteed to be a power of two.
static int ActivationFrameAlignment();
static void ReleaseStore(volatile AtomicWord* ptr, AtomicWord value);
private:
static const int msPerSecond = 1000;
DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
};
class VirtualMemory {
public:
// Reserves virtual memory with size.
explicit VirtualMemory(size_t size);
~VirtualMemory();
// Returns whether the memory has been reserved.
bool IsReserved();
// Returns the start address of the reserved memory.
void* address() {
ASSERT(IsReserved());
return address_;
};
// Returns the size of the reserved memory.
size_t size() { return size_; }
// Commits real memory. Returns whether the operation succeeded.
bool Commit(void* address, size_t size, bool is_executable);
// Uncommit real memory. Returns whether the operation succeeded.
bool Uncommit(void* address, size_t size);
private:
void* address_; // Start address of the virtual memory.
size_t size_; // Size of the virtual memory.
};
// ----------------------------------------------------------------------------
// ThreadHandle
//
// A ThreadHandle represents a thread identifier for a thread. The ThreadHandle
// does not own the underlying os handle. Thread handles can be used for
// refering to threads and testing equality.
class ThreadHandle {
public:
enum Kind { SELF, INVALID };
explicit ThreadHandle(Kind kind);
// Destructor.
~ThreadHandle();
// Test for thread running.
bool IsSelf() const;
// Test for valid thread handle.
bool IsValid() const;
// Get platform-specific data.
class PlatformData;
PlatformData* thread_handle_data() { return data_; }
// Initialize the handle to kind
void Initialize(Kind kind);
private:
PlatformData* data_; // Captures platform dependent data.
};
// ----------------------------------------------------------------------------
// Thread
//
// Thread objects are used for creating and running threads. When the start()
// method is called the new thread starts running the run() method in the new
// thread. The Thread object should not be deallocated before the thread has
// terminated.
class Thread: public ThreadHandle {
public:
// Opaque data type for thread-local storage keys.
enum LocalStorageKey {};
// Create new thread.
Thread();
virtual ~Thread();
// Start new thread by calling the Run() method in the new thread.
void Start();
// Wait until thread terminates.
void Join();
// Abstract method for run handler.
virtual void Run() = 0;
// Thread-local storage.
static LocalStorageKey CreateThreadLocalKey();
static void DeleteThreadLocalKey(LocalStorageKey key);
static void* GetThreadLocal(LocalStorageKey key);
static int GetThreadLocalInt(LocalStorageKey key) {
return static_cast<int>(reinterpret_cast<intptr_t>(GetThreadLocal(key)));
}
static void SetThreadLocal(LocalStorageKey key, void* value);
static void SetThreadLocalInt(LocalStorageKey key, int value) {
SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value)));
}
static bool HasThreadLocal(LocalStorageKey key) {
return GetThreadLocal(key) != NULL;
}
// A hint to the scheduler to let another thread run.
static void YieldCPU();
private:
class PlatformData;
PlatformData* data_;
DISALLOW_COPY_AND_ASSIGN(Thread);
};
// ----------------------------------------------------------------------------
// Mutex
//
// Mutexes are used for serializing access to non-reentrant sections of code.
// The implementations of mutex should allow for nested/recursive locking.
class Mutex {
public:
virtual ~Mutex() {}
// Locks the given mutex. If the mutex is currently unlocked, it becomes
// locked and owned by the calling thread, and immediately. If the mutex
// is already locked by another thread, suspends the calling thread until
// the mutex is unlocked.
virtual int Lock() = 0;
// Unlocks the given mutex. The mutex is assumed to be locked and owned by
// the calling thread on entrance.
virtual int Unlock() = 0;
};
// ----------------------------------------------------------------------------
// ScopedLock
//
// Stack-allocated ScopedLocks provide block-scoped locking and unlocking
// of a mutex.
class ScopedLock {
public:
explicit ScopedLock(Mutex* mutex): mutex_(mutex) {
mutex_->Lock();
}
~ScopedLock() {
mutex_->Unlock();
}
private:
Mutex* mutex_;
DISALLOW_COPY_AND_ASSIGN(ScopedLock);
};
// ----------------------------------------------------------------------------
// Semaphore
//
// A semaphore object is a synchronization object that maintains a count. The
// count is decremented each time a thread completes a wait for the semaphore
// object and incremented each time a thread signals the semaphore. When the
// count reaches zero, threads waiting for the semaphore blocks until the
// count becomes non-zero.
class Semaphore {
public:
virtual ~Semaphore() {}
// Suspends the calling thread until the semaphore counter is non zero
// and then decrements the semaphore counter.
virtual void Wait() = 0;
// Suspends the calling thread until the counter is non zero or the timeout
// time has passsed. If timeout happens the return value is false and the
// counter is unchanged. Otherwise the semaphore counter is decremented and
// true is returned. The timeout value is specified in microseconds.
virtual bool Wait(int timeout) = 0;
// Increments the semaphore counter.
virtual void Signal() = 0;
};
// ----------------------------------------------------------------------------
// Socket
//
class Socket {
public:
virtual ~Socket() {}
// Server initialization.
virtual bool Bind(const int port) = 0;
virtual bool Listen(int backlog) const = 0;
virtual Socket* Accept() const = 0;
// Client initialization.
virtual bool Connect(const char* host, const char* port) = 0;
// Shutdown socket for both read and write. This causes blocking Send and
// Receive calls to exit. After Shutdown the Socket object cannot be used for
// any communication.
virtual bool Shutdown() = 0;
// Data Transimission
virtual int Send(const char* data, int len) const = 0;
virtual int Receive(char* data, int len) const = 0;
// Set the value of the SO_REUSEADDR socket option.
virtual bool SetReuseAddress(bool reuse_address) = 0;
virtual bool IsValid() const = 0;
static bool Setup();
static int LastError();
static uint16_t HToN(uint16_t value);
static uint16_t NToH(uint16_t value);
static uint32_t HToN(uint32_t value);
static uint32_t NToH(uint32_t value);
};
// ----------------------------------------------------------------------------
// Sampler
//
// A sampler periodically samples the state of the VM and optionally
// (if used for profiling) the program counter and stack pointer for
// the thread that created it.
// TickSample captures the information collected for each sample.
class TickSample {
public:
TickSample()
: state(OTHER),
pc(NULL),
sp(NULL),
fp(NULL),
function(NULL),
frames_count(0) {}
StateTag state; // The state of the VM.
Address pc; // Instruction pointer.
Address sp; // Stack pointer.
Address fp; // Frame pointer.
Address function; // The last called JS function.
static const int kMaxFramesCount = 64;
Address stack[kMaxFramesCount]; // Call stack.
int frames_count; // Number of captured frames.
};
#ifdef ENABLE_LOGGING_AND_PROFILING
class Sampler {
public:
// Initialize sampler.
explicit Sampler(int interval, bool profiling);
virtual ~Sampler();
// Performs stack sampling.
virtual void SampleStack(TickSample* sample) = 0;
// This method is called for each sampling period with the current
// program counter.
virtual void Tick(TickSample* sample) = 0;
// Start and stop sampler.
void Start();
void Stop();
// Is the sampler used for profiling.
inline bool IsProfiling() { return profiling_; }
// Whether the sampler is running (that is, consumes resources).
inline bool IsActive() { return active_; }
class PlatformData;
private:
const int interval_;
const bool profiling_;
bool active_;
PlatformData* data_; // Platform specific data.
DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
};
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal
#endif // V8_PLATFORM_H_

18
deps/v8/src/profile-generator-inl.h

@ -97,13 +97,6 @@ void CodeMap::DeleteCode(Address addr) {
} }
bool CpuProfilesCollection::is_last_profile() {
// Called from VM thread, and only it can mutate the list,
// so no locking is needed here.
return current_profiles_.length() == 1;
}
const char* CpuProfilesCollection::GetFunctionName(String* name) { const char* CpuProfilesCollection::GetFunctionName(String* name) {
return GetFunctionName(GetName(name)); return GetFunctionName(GetName(name));
} }
@ -130,17 +123,6 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
} }
} }
template<class Visitor>
void HeapEntriesMap::Apply(Visitor* visitor) {
for (HashMap::Entry* p = entries_.Start();
p != NULL;
p = entries_.Next(p)) {
if (!IsAlias(p->value))
visitor->Apply(reinterpret_cast<HeapEntry*>(p->value));
}
}
} } // namespace v8::internal } } // namespace v8::internal
#endif // ENABLE_LOGGING_AND_PROFILING #endif // ENABLE_LOGGING_AND_PROFILING

1234
deps/v8/src/profile-generator.cc

File diff suppressed because it is too large

491
deps/v8/src/profile-generator.h

@ -279,15 +279,12 @@ class CpuProfilesCollection {
CpuProfile* StopProfiling(int security_token_id, CpuProfile* StopProfiling(int security_token_id,
const char* title, const char* title,
double actual_sampling_rate); double actual_sampling_rate);
CpuProfile* StopProfiling(int security_token_id,
String* title,
double actual_sampling_rate);
List<CpuProfile*>* Profiles(int security_token_id); List<CpuProfile*>* Profiles(int security_token_id);
const char* GetName(String* name) { const char* GetName(String* name) {
return function_and_resource_names_.GetName(name); return function_and_resource_names_.GetName(name);
} }
CpuProfile* GetProfile(int security_token_id, unsigned uid); CpuProfile* GetProfile(int security_token_id, unsigned uid);
inline bool is_last_profile(); bool IsLastProfile(const char* title);
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
String* name, String* resource_name, int line_number); String* name, String* resource_name, int line_number);
@ -423,167 +420,194 @@ class ProfileGenerator {
}; };
class HeapSnapshot;
class HeapEntry; class HeapEntry;
class HeapGraphEdge BASE_EMBEDDED {
class HeapGraphEdge {
public: public:
enum Type { enum Type {
CONTEXT_VARIABLE = v8::HeapGraphEdge::CONTEXT_VARIABLE, kContextVariable = v8::HeapGraphEdge::kContextVariable,
ELEMENT = v8::HeapGraphEdge::ELEMENT, kElement = v8::HeapGraphEdge::kElement,
PROPERTY = v8::HeapGraphEdge::PROPERTY, kProperty = v8::HeapGraphEdge::kProperty,
INTERNAL = v8::HeapGraphEdge::INTERNAL kInternal = v8::HeapGraphEdge::kInternal
}; };
HeapGraphEdge(Type type, const char* name, HeapEntry* from, HeapEntry* to); HeapGraphEdge() { }
HeapGraphEdge(int index, HeapEntry* from, HeapEntry* to); void Init(int child_index, Type type, const char* name, HeapEntry* to);
void Init(int child_index, int index, HeapEntry* to);
Type type() const { return type_; } Type type() { return static_cast<Type>(type_); }
int index() const { int index() {
ASSERT(type_ == ELEMENT); ASSERT(type_ == kElement);
return index_; return index_;
} }
const char* name() const { const char* name() {
ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY || type_ == INTERNAL); ASSERT(type_ == kContextVariable
|| type_ == kProperty
|| type_ == kInternal);
return name_; return name_;
} }
HeapEntry* from() const { return from_; } HeapEntry* to() { return to_; }
HeapEntry* to() const { return to_; }
HeapEntry* From();
private: private:
Type type_; int child_index_ : 30;
unsigned type_ : 2;
union { union {
int index_; int index_;
const char* name_; const char* name_;
}; };
HeapEntry* from_;
HeapEntry* to_; HeapEntry* to_;
DISALLOW_COPY_AND_ASSIGN(HeapGraphEdge); DISALLOW_COPY_AND_ASSIGN(HeapGraphEdge);
}; };
class HeapGraphPath;
class CachedHeapGraphPath; class CachedHeapGraphPath;
class HeapGraphPath;
class HeapSnapshot;
class HeapEntry { // HeapEntry instances represent an entity from the heap (or a special
// virtual node, e.g. root). To make heap snapshots more compact,
// HeapEntries has a special memory layout (no Vectors or Lists used):
//
// +-----------------+
// HeapEntry
// +-----------------+
// HeapGraphEdge |
// ... } children_count
// HeapGraphEdge |
// +-----------------+
// HeapGraphEdge* |
// ... } retainers_count
// HeapGraphEdge* |
// +-----------------+
//
// In a HeapSnapshot, all entries are hand-allocated in a continuous array
// of raw bytes.
//
class HeapEntry BASE_EMBEDDED {
public: public:
enum Type { enum Type {
INTERNAL = v8::HeapGraphNode::INTERNAL, kInternal = v8::HeapGraphNode::kInternal,
ARRAY = v8::HeapGraphNode::ARRAY, kArray = v8::HeapGraphNode::kArray,
STRING = v8::HeapGraphNode::STRING, kString = v8::HeapGraphNode::kString,
OBJECT = v8::HeapGraphNode::OBJECT, kObject = v8::HeapGraphNode::kObject,
CODE = v8::HeapGraphNode::CODE, kCode = v8::HeapGraphNode::kCode,
CLOSURE = v8::HeapGraphNode::CLOSURE kClosure = v8::HeapGraphNode::kClosure
}; };
explicit HeapEntry(HeapSnapshot* snapshot) HeapEntry() { }
: snapshot_(snapshot), void Init(HeapSnapshot* snapshot, int children_count, int retainers_count);
visited_(false), void Init(HeapSnapshot* snapshot,
type_(INTERNAL),
name_(""),
id_(0),
next_auto_index_(0),
self_size_(0),
security_token_id_(TokenEnumerator::kNoSecurityToken),
children_(1),
retainers_(0),
retaining_paths_(0),
total_size_(kUnknownSize),
non_shared_total_size_(kUnknownSize),
painted_(kUnpainted) { }
HeapEntry(HeapSnapshot* snapshot,
Type type, Type type,
const char* name, const char* name,
uint64_t id, uint64_t id,
int self_size, int self_size,
int security_token_id) int children_count,
: snapshot_(snapshot), int retainers_count);
visited_(false),
type_(type), HeapSnapshot* snapshot() { return snapshot_; }
name_(name), Type type() { return static_cast<Type>(type_); }
id_(id), const char* name() { return name_; }
next_auto_index_(1), uint64_t id() { return id_; }
self_size_(self_size), int self_size() { return self_size_; }
security_token_id_(security_token_id),
children_(4), Vector<HeapGraphEdge> children() {
retainers_(4), return Vector<HeapGraphEdge>(children_arr(), children_count_); }
retaining_paths_(4), Vector<HeapGraphEdge*> retainers() {
total_size_(kUnknownSize), return Vector<HeapGraphEdge*>(retainers_arr(), retainers_count_); }
non_shared_total_size_(kUnknownSize), List<HeapGraphPath*>* GetRetainingPaths();
painted_(kUnpainted) { }
~HeapEntry(); void clear_paint() { painted_ = kUnpainted; }
bool painted_reachable() { return painted_ == kPainted; }
bool visited() const { return visited_; } void paint_reachable() {
Type type() const { return type_; } ASSERT(painted_ == kUnpainted);
const char* name() const { return name_; } painted_ = kPainted;
uint64_t id() const { return id_; } }
int self_size() const { return self_size_; }
int security_token_id() const { return security_token_id_; }
bool painted_reachable() { return painted_ == kPaintReachable; }
bool not_painted_reachable_from_others() { bool not_painted_reachable_from_others() {
return painted_ != kPaintReachableFromOthers; return painted_ != kPaintedReachableFromOthers;
}
void paint_reachable_from_others() {
painted_ = kPaintedReachableFromOthers;
} }
const List<HeapGraphEdge*>* children() const { return &children_; }
const List<HeapGraphEdge*>* retainers() const { return &retainers_; }
const List<HeapGraphPath*>* GetRetainingPaths();
template<class Visitor> template<class Visitor>
void ApplyAndPaintAllReachable(Visitor* visitor); void ApplyAndPaintAllReachable(Visitor* visitor);
void ClearPaint() { painted_ = kUnpainted; }
void CutEdges();
void MarkAsVisited() { visited_ = true; }
void PaintAllReachable(); void PaintAllReachable();
void PaintReachable() {
ASSERT(painted_ == kUnpainted);
painted_ = kPaintReachable;
}
void PaintReachableFromOthers() { painted_ = kPaintReachableFromOthers; }
void SetClosureReference(const char* name, HeapEntry* entry);
void SetElementReference(int index, HeapEntry* entry);
void SetInternalReference(const char* name, HeapEntry* entry);
void SetPropertyReference(const char* name, HeapEntry* entry);
void SetAutoIndexReference(HeapEntry* entry);
void SetUnidirAutoIndexReference(HeapEntry* entry);
int TotalSize(); void SetElementReference(
int NonSharedTotalSize(); int child_index, int index, HeapEntry* entry, int retainer_index);
void SetNamedReference(HeapGraphEdge::Type type,
int child_index,
const char* name,
HeapEntry* entry,
int retainer_index);
void SetUnidirElementReference(int child_index, int index, HeapEntry* entry);
int EntrySize() { return EntriesSize(1, children_count_, retainers_count_); }
int ReachableSize();
int RetainedSize();
void Print(int max_depth, int indent); void Print(int max_depth, int indent);
private: static int EntriesSize(int entries_count,
void AddEdge(HeapGraphEdge* edge); int children_count,
int CalculateTotalSize(); int retainers_count);
int CalculateNonSharedTotalSize();
void FindRetainingPaths(HeapEntry* node, CachedHeapGraphPath* prev_path);
void RemoveChild(HeapGraphEdge* edge);
void RemoveRetainer(HeapGraphEdge* edge);
private:
HeapGraphEdge* children_arr() {
return reinterpret_cast<HeapGraphEdge*>(this + 1);
}
HeapGraphEdge** retainers_arr() {
return reinterpret_cast<HeapGraphEdge**>(children_arr() + children_count_);
}
const char* TypeAsString(); const char* TypeAsString();
unsigned painted_: 2;
unsigned type_: 3;
// The calculated data is stored in HeapSnapshot in HeapEntryCalculatedData
// entries. See AddCalculatedData and GetCalculatedData.
int calculated_data_index_: 27;
int self_size_;
int children_count_;
int retainers_count_;
HeapSnapshot* snapshot_; HeapSnapshot* snapshot_;
bool visited_;
Type type_;
const char* name_; const char* name_;
uint64_t id_; uint64_t id_;
int next_auto_index_;
int self_size_; static const unsigned kUnpainted = 0;
int security_token_id_; static const unsigned kPainted = 1;
List<HeapGraphEdge*> children_; static const unsigned kPaintedReachableFromOthers = 2;
List<HeapGraphEdge*> retainers_; static const int kNoCalculatedData = -1;
List<HeapGraphPath*> retaining_paths_;
int total_size_; DISALLOW_COPY_AND_ASSIGN(HeapEntry);
int non_shared_total_size_; };
int painted_;
class HeapEntryCalculatedData {
public:
HeapEntryCalculatedData()
: retaining_paths_(NULL),
reachable_size_(kUnknownSize),
retained_size_(kUnknownSize) {
}
void Dispose();
List<HeapGraphPath*>* GetRetainingPaths(HeapEntry* entry);
int ReachableSize(HeapEntry* entry);
int RetainedSize(HeapEntry* entry);
private:
void CalculateSizes(HeapEntry* entry);
void FindRetainingPaths(HeapEntry* entry, CachedHeapGraphPath* prev_path);
List<HeapGraphPath*>* retaining_paths_;
int reachable_size_;
int retained_size_;
static const int kUnknownSize = -1; static const int kUnknownSize = -1;
static const int kUnpainted = 0;
static const int kPaintReachable = 1;
static const int kPaintReachableFromOthers = 2;
DISALLOW_IMPLICIT_CONSTRUCTORS(HeapEntry); // Allow generated copy constructor and assignment operator.
}; };
@ -595,7 +619,7 @@ class HeapGraphPath {
void Add(HeapGraphEdge* edge) { path_.Add(edge); } void Add(HeapGraphEdge* edge) { path_.Add(edge); }
void Set(int index, HeapGraphEdge* edge) { path_[index] = edge; } void Set(int index, HeapGraphEdge* edge) { path_[index] = edge; }
const List<HeapGraphEdge*>* path() const { return &path_; } const List<HeapGraphEdge*>* path() { return &path_; }
void Print(); void Print();
@ -606,39 +630,6 @@ class HeapGraphPath {
}; };
class HeapEntriesMap {
public:
HeapEntriesMap();
~HeapEntriesMap();
void Alias(HeapObject* object, HeapEntry* entry);
void Apply(void (HeapEntry::*Func)(void));
template<class Visitor>
void Apply(Visitor* visitor);
HeapEntry* Map(HeapObject* object);
void Pair(HeapObject* object, HeapEntry* entry);
uint32_t capacity() { return entries_.capacity(); }
private:
INLINE(uint32_t Hash(HeapObject* object)) {
return static_cast<uint32_t>(reinterpret_cast<intptr_t>(object));
}
INLINE(static bool HeapObjectsMatch(void* key1, void* key2)) {
return key1 == key2;
}
INLINE(bool IsAlias(void* ptr)) {
return reinterpret_cast<intptr_t>(ptr) & kAliasTag;
}
static const intptr_t kAliasTag = 1;
HashMap entries_;
DISALLOW_COPY_AND_ASSIGN(HeapEntriesMap);
};
class HeapSnapshotsCollection; class HeapSnapshotsCollection;
class HeapSnapshotsDiff; class HeapSnapshotsDiff;
@ -653,53 +644,52 @@ class HeapSnapshot {
const char* title, const char* title,
unsigned uid); unsigned uid);
~HeapSnapshot(); ~HeapSnapshot();
void ClearPaint();
void CutObjectsFromForeignSecurityContexts();
HeapEntry* GetEntry(Object* object);
void SetClosureReference(
HeapEntry* parent, String* reference_name, Object* child);
void SetElementReference(HeapEntry* parent, int index, Object* child);
void SetInternalReference(
HeapEntry* parent, const char* reference_name, Object* child);
void SetPropertyReference(
HeapEntry* parent, String* reference_name, Object* child);
INLINE(const char* title() const) { return title_; } HeapSnapshotsCollection* collection() { return collection_; }
INLINE(unsigned uid() const) { return uid_; } const char* title() { return title_; }
const HeapEntry* const_root() const { return &root_; } unsigned uid() { return uid_; }
HeapEntry* root() { return &root_; } HeapEntry* root() { return entries_[root_entry_index_]; }
template<class Visitor>
void IterateEntries(Visitor* visitor) { entries_.Apply(visitor); } void AllocateEntries(
List<HeapEntry*>* GetSortedEntriesList(); int entries_count, int children_count, int retainers_count);
HeapEntry* AddEntry(
HeapObject* object, int children_count, int retainers_count);
bool WillAddEntry(HeapObject* object);
int AddCalculatedData();
HeapEntryCalculatedData& GetCalculatedData(int index) {
return calculated_data_[index];
}
void ClearPaint();
HeapSnapshotsDiff* CompareWith(HeapSnapshot* snapshot); HeapSnapshotsDiff* CompareWith(HeapSnapshot* snapshot);
List<HeapEntry*>* GetSortedEntriesList();
template<class Visitor>
void IterateEntries(Visitor* visitor) { entries_.Iterate(visitor); }
void Print(int max_depth); void Print(int max_depth);
void PrintEntriesSize();
static HeapObject *const kInternalRootObject;
private: private:
HeapEntry* AddEntry(HeapObject* object, HeapEntry::Type type) { HeapEntry* AddEntry(HeapObject* object,
return AddEntry(object, type, ""); HeapEntry::Type type,
} const char* name,
HeapEntry* AddEntry( int children_count,
HeapObject* object, HeapEntry::Type type, const char* name); int retainers_count);
void AddEntryAlias(HeapObject* object, HeapEntry* entry) { HeapEntry* GetNextEntryToInit();
entries_.Alias(object, entry);
}
HeapEntry* FindEntry(HeapObject* object) {
return entries_.Map(object);
}
int GetGlobalSecurityToken();
int GetObjectSecurityToken(HeapObject* obj);
static int GetObjectSize(HeapObject* obj); static int GetObjectSize(HeapObject* obj);
static int CalculateNetworkSize(JSObject* obj); static int CalculateNetworkSize(JSObject* obj);
HeapSnapshotsCollection* collection_; HeapSnapshotsCollection* collection_;
const char* title_; const char* title_;
unsigned uid_; unsigned uid_;
HeapEntry root_; int root_entry_index_;
// Mapping from HeapObject* pointers to HeapEntry* pointers. char* raw_entries_;
HeapEntriesMap entries_; List<HeapEntry*> entries_;
// Entries sorted by id. bool entries_sorted_;
List<HeapEntry*>* sorted_entries_; List<HeapEntryCalculatedData> calculated_data_;
friend class HeapSnapshotTester;
DISALLOW_COPY_AND_ASSIGN(HeapSnapshot); DISALLOW_COPY_AND_ASSIGN(HeapSnapshot);
}; };
@ -748,30 +738,36 @@ class HeapSnapshotsDiff {
HeapSnapshotsDiff(HeapSnapshot* snapshot1, HeapSnapshot* snapshot2) HeapSnapshotsDiff(HeapSnapshot* snapshot1, HeapSnapshot* snapshot2)
: snapshot1_(snapshot1), : snapshot1_(snapshot1),
snapshot2_(snapshot2), snapshot2_(snapshot2),
additions_root_(new HeapEntry(snapshot2)), raw_additions_root_(NULL),
deletions_root_(new HeapEntry(snapshot1)) { } raw_deletions_root_(NULL) { }
~HeapSnapshotsDiff() { ~HeapSnapshotsDiff() {
delete deletions_root_; DeleteArray(raw_deletions_root_);
delete additions_root_; DeleteArray(raw_additions_root_);
} }
void AddAddedEntry(HeapEntry* entry) { void AddAddedEntry(int child_index, int index, HeapEntry* entry) {
additions_root_->SetUnidirAutoIndexReference(entry); additions_root()->SetUnidirElementReference(child_index, index, entry);
} }
void AddDeletedEntry(HeapEntry* entry) { void AddDeletedEntry(int child_index, int index, HeapEntry* entry) {
deletions_root_->SetUnidirAutoIndexReference(entry); deletions_root()->SetUnidirElementReference(child_index, index, entry);
} }
const HeapEntry* additions_root() const { return additions_root_; } void CreateRoots(int additions_count, int deletions_count);
const HeapEntry* deletions_root() const { return deletions_root_; }
HeapEntry* additions_root() {
return reinterpret_cast<HeapEntry*>(raw_additions_root_);
}
HeapEntry* deletions_root() {
return reinterpret_cast<HeapEntry*>(raw_deletions_root_);
}
private: private:
HeapSnapshot* snapshot1_; HeapSnapshot* snapshot1_;
HeapSnapshot* snapshot2_; HeapSnapshot* snapshot2_;
HeapEntry* additions_root_; char* raw_additions_root_;
HeapEntry* deletions_root_; char* raw_deletions_root_;
DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsDiff); DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsDiff);
}; };
@ -830,18 +826,123 @@ class HeapSnapshotsCollection {
}; };
// The HeapEntriesMap instance is used to track a mapping between
// real heap objects and their representations in heap snapshots.
class HeapEntriesMap {
public:
HeapEntriesMap();
~HeapEntriesMap();
// Aliasing is used for skipping intermediate proxy objects, like
// JSGlobalPropertyCell.
void Alias(HeapObject* from, HeapObject* to);
HeapEntry* Map(HeapObject* object);
void Pair(HeapObject* object, HeapEntry* entry);
void CountReference(HeapObject* from, HeapObject* to,
int* prev_children_count = NULL,
int* prev_retainers_count = NULL);
template<class Visitor>
void UpdateEntries(Visitor* visitor);
int entries_count() { return entries_count_; }
int total_children_count() { return total_children_count_; }
int total_retainers_count() { return total_retainers_count_; }
private:
struct EntryInfo {
explicit EntryInfo(HeapEntry* entry)
: entry(entry), children_count(0), retainers_count(0) { }
HeapEntry* entry;
int children_count;
int retainers_count;
};
uint32_t Hash(HeapObject* object) {
return static_cast<uint32_t>(reinterpret_cast<intptr_t>(object));
}
static bool HeapObjectsMatch(void* key1, void* key2) { return key1 == key2; }
bool IsAlias(void* ptr) {
return reinterpret_cast<intptr_t>(ptr) & kAliasTag;
}
void* MakeAlias(void* ptr) {
return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(ptr) | kAliasTag);
}
void* Unalias(void* ptr) {
return reinterpret_cast<void*>(
reinterpret_cast<intptr_t>(ptr) & (~kAliasTag));
}
HashMap entries_;
int entries_count_;
int total_children_count_;
int total_retainers_count_;
static const intptr_t kAliasTag = 1;
DISALLOW_COPY_AND_ASSIGN(HeapEntriesMap);
};
class HeapSnapshotGenerator { class HeapSnapshotGenerator {
public: public:
class SnapshotFillerInterface {
public:
virtual ~SnapshotFillerInterface() { }
virtual HeapEntry* AddEntry(HeapObject* obj) = 0;
virtual void SetElementReference(HeapObject* parent_obj,
HeapEntry* parent_entry,
int index,
Object* child_obj,
HeapEntry* child_entry) = 0;
virtual void SetNamedReference(HeapGraphEdge::Type type,
HeapObject* parent_obj,
HeapEntry* parent_entry,
const char* reference_name,
Object* child_obj,
HeapEntry* child_entry) = 0;
virtual void SetRootReference(Object* child_obj,
HeapEntry* child_entry) = 0;
static HeapEntry *const kHeapEntryPlaceholder;
};
explicit HeapSnapshotGenerator(HeapSnapshot* snapshot); explicit HeapSnapshotGenerator(HeapSnapshot* snapshot);
void GenerateSnapshot(); void GenerateSnapshot();
private: private:
HeapEntry* GetEntry(Object* obj);
int GetGlobalSecurityToken();
int GetObjectSecurityToken(HeapObject* obj);
void ExtractReferences(HeapObject* obj); void ExtractReferences(HeapObject* obj);
void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry); void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry);
void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry); void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry);
void ExtractElementReferences(JSObject* js_obj, HeapEntry* entry); void ExtractElementReferences(JSObject* js_obj, HeapEntry* entry);
void SetClosureReference(HeapObject* parent_obj,
HeapEntry* parent,
String* reference_name,
Object* child);
void SetElementReference(HeapObject* parent_obj,
HeapEntry* parent,
int index,
Object* child);
void SetInternalReference(HeapObject* parent_obj,
HeapEntry* parent,
const char* reference_name,
Object* child);
void SetPropertyReference(HeapObject* parent_obj,
HeapEntry* parent,
String* reference_name,
Object* child);
void SetRootReference(Object* child);
HeapSnapshot* snapshot_; HeapSnapshot* snapshot_;
HeapSnapshotsCollection* collection_;
// Mapping from HeapObject* pointers to HeapEntry* pointers.
HeapEntriesMap entries_;
SnapshotFillerInterface* filler_;
friend class IndexedReferencesExtractor;
DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator); DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator);
}; };

6
deps/v8/src/property.h

@ -115,8 +115,8 @@ class MapTransitionDescriptor: public Descriptor {
// the same CONSTANT_FUNCTION field. // the same CONSTANT_FUNCTION field.
class ConstTransitionDescriptor: public Descriptor { class ConstTransitionDescriptor: public Descriptor {
public: public:
explicit ConstTransitionDescriptor(String* key) explicit ConstTransitionDescriptor(String* key, Map* map)
: Descriptor(key, Smi::FromInt(0), NONE, CONSTANT_TRANSITION) { } : Descriptor(key, map, NONE, CONSTANT_TRANSITION) { }
}; };
@ -260,7 +260,7 @@ class LookupResult BASE_EMBEDDED {
Map* GetTransitionMap() { Map* GetTransitionMap() {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE); ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
ASSERT(type() == MAP_TRANSITION); ASSERT(type() == MAP_TRANSITION || type() == CONSTANT_TRANSITION);
return Map::cast(GetValue()); return Map::cast(GetValue());
} }

16
deps/v8/src/runtime.cc

@ -305,13 +305,14 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
} }
Handle<Object> result; Handle<Object> result;
uint32_t element_index = 0; uint32_t element_index = 0;
if (key->ToArrayIndex(&element_index)) { if (key->IsSymbol()) {
// Array index (uint32). // If key is a symbol it is not an array element.
result = SetElement(boilerplate, element_index, value);
} else if (key->IsSymbol()) {
// The key is not an array index.
Handle<String> name(String::cast(*key)); Handle<String> name(String::cast(*key));
ASSERT(!name->AsArrayIndex(&element_index));
result = SetProperty(boilerplate, name, value, NONE); result = SetProperty(boilerplate, name, value, NONE);
} else if (key->ToArrayIndex(&element_index)) {
// Array index (uint32).
result = SetElement(boilerplate, element_index, value);
} else { } else {
// Non-uint32 number. // Non-uint32 number.
ASSERT(key->IsNumber()); ASSERT(key->IsNumber());
@ -1626,7 +1627,8 @@ static Object* Runtime_SetCode(Arguments args) {
} }
// Set the code, scope info, formal parameter count, // Set the code, scope info, formal parameter count,
// and the length of the target function. // and the length of the target function.
target->set_code(fun->code()); target->shared()->set_code(shared->code());
target->set_code(shared->code());
target->shared()->set_scope_info(shared->scope_info()); target->shared()->set_scope_info(shared->scope_info());
target->shared()->set_length(shared->length()); target->shared()->set_length(shared->length());
target->shared()->set_formal_parameter_count( target->shared()->set_formal_parameter_count(
@ -6869,7 +6871,7 @@ static Object* Runtime_LazyCompile(Arguments args) {
Handle<JSFunction> function = args.at<JSFunction>(0); Handle<JSFunction> function = args.at<JSFunction>(0);
#ifdef DEBUG #ifdef DEBUG
if (FLAG_trace_lazy) { if (FLAG_trace_lazy && !function->shared()->is_compiled()) {
PrintF("[lazy: "); PrintF("[lazy: ");
function->shared()->name()->Print(); function->shared()->name()->Print();
PrintF("]\n"); PrintF("]\n");

8
deps/v8/src/runtime.js

@ -175,7 +175,7 @@ function ADD(x) {
// Left operand (this) is already a string. // Left operand (this) is already a string.
function STRING_ADD_LEFT(y) { function STRING_ADD_LEFT(y) {
if (!IS_STRING(y)) { if (!IS_STRING(y)) {
if (IS_STRING_WRAPPER(y)) { if (IS_STRING_WRAPPER(y) && %_IsStringWrapperSafeForDefaultValueOf(y)) {
y = %_ValueOf(y); y = %_ValueOf(y);
} else { } else {
y = IS_NUMBER(y) y = IS_NUMBER(y)
@ -191,7 +191,7 @@ function STRING_ADD_LEFT(y) {
function STRING_ADD_RIGHT(y) { function STRING_ADD_RIGHT(y) {
var x = this; var x = this;
if (!IS_STRING(x)) { if (!IS_STRING(x)) {
if (IS_STRING_WRAPPER(x)) { if (IS_STRING_WRAPPER(x) && %_IsStringWrapperSafeForDefaultValueOf(x)) {
x = %_ValueOf(x); x = %_ValueOf(x);
} else { } else {
x = IS_NUMBER(x) x = IS_NUMBER(x)
@ -387,11 +387,11 @@ function GET_KEYS() {
// Filter a given key against an object by checking if the object // Filter a given key against an object by checking if the object
// has a property with the given key; return the key as a string if // has a property with the given key; return the key as a string if
// it has. Otherwise returns null. Used in for-in statements. // it has. Otherwise returns 0 (smi). Used in for-in statements.
function FILTER_KEY(key) { function FILTER_KEY(key) {
var string = %ToString(key); var string = %ToString(key);
if (%HasProperty(this, string)) return string; if (%HasProperty(this, string)) return string;
return null; return 0;
} }

8
deps/v8/src/serialize.cc

@ -680,14 +680,6 @@ void Deserializer::ReadObject(int space_number,
LOG(SnapshotPositionEvent(address, source_->position())); LOG(SnapshotPositionEvent(address, source_->position()));
} }
ReadChunk(current, limit, space_number, address); ReadChunk(current, limit, space_number, address);
if (space == Heap::map_space()) {
ASSERT(size == Map::kSize);
HeapObject* obj = HeapObject::FromAddress(address);
Map* map = reinterpret_cast<Map*>(obj);
map->set_scavenger(Heap::GetScavenger(map->instance_type(),
map->instance_size()));
}
} }

2
deps/v8/src/serialize.h

@ -248,7 +248,7 @@ class SerializerDeserializer: public ObjectVisitor {
} }
static int partial_snapshot_cache_length_; static int partial_snapshot_cache_length_;
static const int kPartialSnapshotCacheCapacity = 1300; static const int kPartialSnapshotCacheCapacity = 1400;
static Object* partial_snapshot_cache_[]; static Object* partial_snapshot_cache_[];
}; };

17
deps/v8/src/stub-cache.cc

@ -789,23 +789,6 @@ Object* StubCache::ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind) {
#endif #endif
Object* StubCache::ComputeLazyCompile(int argc) {
Code::Flags flags =
Code::ComputeFlags(Code::STUB, NOT_IN_LOOP, UNINITIALIZED, NORMAL, argc);
Object* probe = ProbeCache(flags);
if (!probe->IsUndefined()) return probe;
StubCompiler compiler;
Object* result = FillCache(compiler.CompileLazyCompile(flags));
if (result->IsCode()) {
Code* code = Code::cast(result);
USE(code);
PROFILE(CodeCreateEvent(Logger::LAZY_COMPILE_TAG,
code, code->arguments_count()));
}
return result;
}
void StubCache::Clear() { void StubCache::Clear() {
for (int i = 0; i < kPrimaryTableSize; i++) { for (int i = 0; i < kPrimaryTableSize; i++) {
primary_[i].key = Heap::empty_string(); primary_[i].key = Heap::empty_string();

3
deps/v8/src/stub-cache.h

@ -210,8 +210,6 @@ class StubCache : public AllStatic {
static Object* ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind); static Object* ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind);
#endif #endif
static Object* ComputeLazyCompile(int argc);
// Update cache for entry hash(name, map). // Update cache for entry hash(name, map).
static Code* Set(String* name, Map* map, Code* code); static Code* Set(String* name, Map* map, Code* code);
@ -357,7 +355,6 @@ class StubCompiler BASE_EMBEDDED {
Object* CompileCallDebugBreak(Code::Flags flags); Object* CompileCallDebugBreak(Code::Flags flags);
Object* CompileCallDebugPrepareStepIn(Code::Flags flags); Object* CompileCallDebugPrepareStepIn(Code::Flags flags);
#endif #endif
Object* CompileLazyCompile(Code::Flags flags);
// Static functions for generating parts of stubs. // Static functions for generating parts of stubs.
static void GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm, static void GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,

129
deps/v8/src/third_party/dtoa/dtoa.c

@ -270,25 +270,14 @@ Exactly one of IEEE_8087, IEEE_MC68k, VAX, or IBM should be defined.
typedef union { double d; ULong L[2]; } U; typedef union { double d; ULong L[2]; } U;
#ifdef YES_ALIAS
#define dval(x) x
#ifdef IEEE_8087 #ifdef IEEE_8087
#define word0(x) ((ULong *)&x)[1] #define word0(x) (x).L[1]
#define word1(x) ((ULong *)&x)[0] #define word1(x) (x).L[0]
#else #else
#define word0(x) ((ULong *)&x)[0] #define word0(x) (x).L[0]
#define word1(x) ((ULong *)&x)[1] #define word1(x) (x).L[1]
#endif
#else
#ifdef IEEE_8087
#define word0(x) ((U*)&x)->L[1]
#define word1(x) ((U*)&x)->L[0]
#else
#define word0(x) ((U*)&x)->L[0]
#define word1(x) ((U*)&x)->L[1]
#endif
#define dval(x) ((U*)&x)->d
#endif #endif
#define dval(x) (x).d
/* The following definition of Storeinc is appropriate for MIPS processors. /* The following definition of Storeinc is appropriate for MIPS processors.
* An alternative that might be better on some machines is * An alternative that might be better on some machines is
@ -1108,13 +1097,15 @@ diff
static double static double
ulp ulp
#ifdef KR_headers #ifdef KR_headers
(x) double x; (dx) double dx;
#else #else
(double x) (double dx)
#endif #endif
{ {
register Long L; register Long L;
double a; U x, a;
dval(x) = dx;
L = (word0(x) & Exp_mask) - (P-1)*Exp_msk1; L = (word0(x) & Exp_mask) - (P-1)*Exp_msk1;
#ifndef Avoid_Underflow #ifndef Avoid_Underflow
@ -1157,7 +1148,7 @@ b2d
{ {
ULong *xa, *xa0, w, y, z; ULong *xa, *xa0, w, y, z;
int k; int k;
double d; U d;
#ifdef VAX #ifdef VAX
ULong d0, d1; ULong d0, d1;
#else #else
@ -1220,9 +1211,9 @@ b2d
static Bigint * static Bigint *
d2b d2b
#ifdef KR_headers #ifdef KR_headers
(d, e, bits) double d; int *e, *bits; (dd, e, bits) double dd; int *e, *bits;
#else #else
(double d, int *e, int *bits) (double dd, int *e, int *bits)
#endif #endif
{ {
Bigint *b; Bigint *b;
@ -1236,6 +1227,8 @@ d2b
d0 = word0(d) >> 16 | word0(d) << 16; d0 = word0(d) >> 16 | word0(d) << 16;
d1 = word1(d) >> 16 | word1(d) << 16; d1 = word1(d) >> 16 | word1(d) << 16;
#else #else
U d;
dval(d) = dd;
#define d0 word0(d) #define d0 word0(d)
#define d1 word1(d) #define d1 word1(d)
#endif #endif
@ -1368,7 +1361,7 @@ ratio
(Bigint *a, Bigint *b) (Bigint *a, Bigint *b)
#endif #endif
{ {
double da, db; U da, db;
int k, ka, kb; int k, ka, kb;
dval(da) = b2d(a, &ka); dval(da) = b2d(a, &ka);
@ -1542,7 +1535,8 @@ strtod
int bb2, bb5, bbe, bd2, bd5, bbbits, bs2, c, dsign, int bb2, bb5, bbe, bd2, bd5, bbbits, bs2, c, dsign,
e, e1, esign, i, j, k, nd, nd0, nf, nz, nz0, sign; e, e1, esign, i, j, k, nd, nd0, nf, nz, nz0, sign;
CONST char *s, *s0, *s1; CONST char *s, *s0, *s1;
double aadj, aadj1, adj, rv, rv0; double aadj;
U aadj1, adj, rv, rv0;
Long L; Long L;
ULong y, z; ULong y, z;
Bigint *bb = NULL, *bb1, *bd = NULL, *bd0, *bs = NULL, *delta = NULL; Bigint *bb = NULL, *bb1, *bd = NULL, *bd0, *bs = NULL, *delta = NULL;
@ -2042,12 +2036,12 @@ strtod
} }
if (rounding) { if (rounding) {
if (dsign) { if (dsign) {
adj = 1.; dval(adj) = 1.;
goto apply_adj; goto apply_adj;
} }
} }
else if (!dsign) { else if (!dsign) {
adj = -1.; dval(adj) = -1.;
if (!word1(rv) if (!word1(rv)
&& !(word0(rv) & Frac_mask)) { && !(word0(rv) & Frac_mask)) {
y = word0(rv) & Exp_mask; y = word0(rv) & Exp_mask;
@ -2059,7 +2053,7 @@ strtod
{ {
delta = lshift(delta,Log2P); delta = lshift(delta,Log2P);
if (cmp(delta, bs) <= 0) if (cmp(delta, bs) <= 0)
adj = -0.5; dval(adj) = -0.5;
} }
} }
apply_adj: apply_adj:
@ -2072,26 +2066,26 @@ strtod
if ((word0(rv) & Exp_mask) <= if ((word0(rv) & Exp_mask) <=
P*Exp_msk1) { P*Exp_msk1) {
word0(rv) += P*Exp_msk1; word0(rv) += P*Exp_msk1;
dval(rv) += adj*ulp(dval(rv)); dval(rv) += dval(adj)*ulp(dval(rv));
word0(rv) -= P*Exp_msk1; word0(rv) -= P*Exp_msk1;
} }
else else
#endif /*Sudden_Underflow*/ #endif /*Sudden_Underflow*/
#endif /*Avoid_Underflow*/ #endif /*Avoid_Underflow*/
dval(rv) += adj*ulp(dval(rv)); dval(rv) += dval(adj)*ulp(dval(rv));
} }
break; break;
} }
adj = ratio(delta, bs); dval(adj) = ratio(delta, bs);
if (adj < 1.) if (dval(adj) < 1.)
adj = 1.; dval(adj) = 1.;
if (adj <= 0x7ffffffe) { if (dval(adj) <= 0x7ffffffe) {
/* adj = rounding ? ceil(adj) : floor(adj); */ /* adj = rounding ? ceil(adj) : floor(adj); */
y = adj; y = dval(adj);
if (y != adj) { if (y != dval(adj)) {
if (!((rounding>>1) ^ dsign)) if (!((rounding>>1) ^ dsign))
y++; y++;
adj = y; dval(adj) = y;
} }
} }
#ifdef Avoid_Underflow #ifdef Avoid_Underflow
@ -2101,21 +2095,21 @@ strtod
#ifdef Sudden_Underflow #ifdef Sudden_Underflow
if ((word0(rv) & Exp_mask) <= P*Exp_msk1) { if ((word0(rv) & Exp_mask) <= P*Exp_msk1) {
word0(rv) += P*Exp_msk1; word0(rv) += P*Exp_msk1;
adj *= ulp(dval(rv)); dval(adj) *= ulp(dval(rv));
if (dsign) if (dsign)
dval(rv) += adj; dval(rv) += dval(adj);
else else
dval(rv) -= adj; dval(rv) -= dval(adj);
word0(rv) -= P*Exp_msk1; word0(rv) -= P*Exp_msk1;
goto cont; goto cont;
} }
#endif /*Sudden_Underflow*/ #endif /*Sudden_Underflow*/
#endif /*Avoid_Underflow*/ #endif /*Avoid_Underflow*/
adj *= ulp(dval(rv)); dval(adj) *= ulp(dval(rv));
if (dsign) if (dsign)
dval(rv) += adj; dval(rv) += dval(adj);
else else
dval(rv) -= adj; dval(rv) -= dval(adj);
goto cont; goto cont;
} }
#endif /*Honor_FLT_ROUNDS*/ #endif /*Honor_FLT_ROUNDS*/
@ -2237,14 +2231,14 @@ strtod
} }
if ((aadj = ratio(delta, bs)) <= 2.) { if ((aadj = ratio(delta, bs)) <= 2.) {
if (dsign) if (dsign)
aadj = aadj1 = 1.; aadj = dval(aadj1) = 1.;
else if (word1(rv) || word0(rv) & Bndry_mask) { else if (word1(rv) || word0(rv) & Bndry_mask) {
#ifndef Sudden_Underflow #ifndef Sudden_Underflow
if (word1(rv) == Tiny1 && !word0(rv)) if (word1(rv) == Tiny1 && !word0(rv))
goto undfl; goto undfl;
#endif #endif
aadj = 1.; aadj = 1.;
aadj1 = -1.; dval(aadj1) = -1.;
} }
else { else {
/* special case -- power of FLT_RADIX to be */ /* special case -- power of FLT_RADIX to be */
@ -2254,24 +2248,24 @@ strtod
aadj = 1./FLT_RADIX; aadj = 1./FLT_RADIX;
else else
aadj *= 0.5; aadj *= 0.5;
aadj1 = -aadj; dval(aadj1) = -aadj;
} }
} }
else { else {
aadj *= 0.5; aadj *= 0.5;
aadj1 = dsign ? aadj : -aadj; dval(aadj1) = dsign ? aadj : -aadj;
#ifdef Check_FLT_ROUNDS #ifdef Check_FLT_ROUNDS
switch(Rounding) { switch(Rounding) {
case 2: /* towards +infinity */ case 2: /* towards +infinity */
aadj1 -= 0.5; dval(aadj1) -= 0.5;
break; break;
case 0: /* towards 0 */ case 0: /* towards 0 */
case 3: /* towards -infinity */ case 3: /* towards -infinity */
aadj1 += 0.5; dval(aadj1) += 0.5;
} }
#else #else
if (Flt_Rounds == 0) if (Flt_Rounds == 0)
aadj1 += 0.5; dval(aadj1) += 0.5;
#endif /*Check_FLT_ROUNDS*/ #endif /*Check_FLT_ROUNDS*/
} }
y = word0(rv) & Exp_mask; y = word0(rv) & Exp_mask;
@ -2281,8 +2275,8 @@ strtod
if (y == Exp_msk1*(DBL_MAX_EXP+Bias-1)) { if (y == Exp_msk1*(DBL_MAX_EXP+Bias-1)) {
dval(rv0) = dval(rv); dval(rv0) = dval(rv);
word0(rv) -= P*Exp_msk1; word0(rv) -= P*Exp_msk1;
adj = aadj1 * ulp(dval(rv)); dval(adj) = dval(aadj1) * ulp(dval(rv));
dval(rv) += adj; dval(rv) += dval(adj);
if ((word0(rv) & Exp_mask) >= if ((word0(rv) & Exp_mask) >=
Exp_msk1*(DBL_MAX_EXP+Bias-P)) { Exp_msk1*(DBL_MAX_EXP+Bias-P)) {
if (word0(rv0) == Big0 && word1(rv0) == Big1) if (word0(rv0) == Big0 && word1(rv0) == Big1)
@ -2301,19 +2295,19 @@ strtod
if ((z = aadj) <= 0) if ((z = aadj) <= 0)
z = 1; z = 1;
aadj = z; aadj = z;
aadj1 = dsign ? aadj : -aadj; dval(aadj1) = dsign ? aadj : -aadj;
} }
word0(aadj1) += (2*P+1)*Exp_msk1 - y; word0(aadj1) += (2*P+1)*Exp_msk1 - y;
} }
adj = aadj1 * ulp(dval(rv)); dval(adj) = dval(aadj1) * ulp(dval(rv));
dval(rv) += adj; dval(rv) += dval(adj);
#else #else
#ifdef Sudden_Underflow #ifdef Sudden_Underflow
if ((word0(rv) & Exp_mask) <= P*Exp_msk1) { if ((word0(rv) & Exp_mask) <= P*Exp_msk1) {
dval(rv0) = dval(rv); dval(rv0) = dval(rv);
word0(rv) += P*Exp_msk1; word0(rv) += P*Exp_msk1;
adj = aadj1 * ulp(dval(rv)); dval(adj) = dval(aadj1) * ulp(dval(rv));
dval(rv) += adj; dval(rv) += dval(adj);
#ifdef IBM #ifdef IBM
if ((word0(rv) & Exp_mask) < P*Exp_msk1) if ((word0(rv) & Exp_mask) < P*Exp_msk1)
#else #else
@ -2331,8 +2325,8 @@ strtod
word0(rv) -= P*Exp_msk1; word0(rv) -= P*Exp_msk1;
} }
else { else {
adj = aadj1 * ulp(dval(rv)); dval(adj) = dval(aadj1) * ulp(dval(rv));
dval(rv) += adj; dval(rv) += dval(adj);
} }
#else /*Sudden_Underflow*/ #else /*Sudden_Underflow*/
/* Compute adj so that the IEEE rounding rules will /* Compute adj so that the IEEE rounding rules will
@ -2343,12 +2337,12 @@ strtod
* example: 1.2e-307 . * example: 1.2e-307 .
*/ */
if (y <= (P-1)*Exp_msk1 && aadj > 1.) { if (y <= (P-1)*Exp_msk1 && aadj > 1.) {
aadj1 = (double)(int)(aadj + 0.5); dval(aadj1) = (double)(int)(aadj + 0.5);
if (!dsign) if (!dsign)
aadj1 = -aadj1; dval(aadj1) = -dval(aadj1);
} }
adj = aadj1 * ulp(dval(rv)); dval(adj) = dval(aadj1) * ulp(dval(rv));
dval(rv) += adj; dval(rv) += dval(adj);
#endif /*Sudden_Underflow*/ #endif /*Sudden_Underflow*/
#endif /*Avoid_Underflow*/ #endif /*Avoid_Underflow*/
} }
@ -2638,10 +2632,10 @@ freedtoa(char *s)
char * char *
dtoa dtoa
#ifdef KR_headers #ifdef KR_headers
(d, mode, ndigits, decpt, sign, rve) (dd, mode, ndigits, decpt, sign, rve)
double d; int mode, ndigits, *decpt, *sign; char **rve; double dd; int mode, ndigits, *decpt, *sign; char **rve;
#else #else
(double d, int mode, int ndigits, int *decpt, int *sign, char **rve) (double dd, int mode, int ndigits, int *decpt, int *sign, char **rve)
#endif #endif
{ {
/* Arguments ndigits, decpt, sign are similar to those /* Arguments ndigits, decpt, sign are similar to those
@ -2687,7 +2681,8 @@ dtoa
ULong x; ULong x;
#endif #endif
Bigint *b, *b1, *delta, *mlo, *mhi, *S; Bigint *b, *b1, *delta, *mlo, *mhi, *S;
double d2, ds, eps; double ds;
U d2, eps;
char *s, *s0; char *s, *s0;
#ifdef Honor_FLT_ROUNDS #ifdef Honor_FLT_ROUNDS
int rounding; int rounding;
@ -2695,6 +2690,8 @@ dtoa
#ifdef SET_INEXACT #ifdef SET_INEXACT
int inexact, oldinexact; int inexact, oldinexact;
#endif #endif
U d;
dval(d) = dd;
/* In mode 2 and 3 we bias rounding up when there are ties. */ /* In mode 2 and 3 we bias rounding up when there are ties. */
bias_round_up = mode == 2 || mode == 3; bias_round_up = mode == 2 || mode == 3;

12
deps/v8/src/top.cc

@ -107,16 +107,15 @@ void Top::IterateThread(ThreadVisitor* v, char* t) {
void Top::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) { void Top::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
v->VisitPointer(&(thread->pending_exception_)); v->VisitPointer(&(thread->pending_exception_));
v->VisitPointer(&(thread->pending_message_obj_)); v->VisitPointer(&(thread->pending_message_obj_));
v->VisitPointer( v->VisitPointer(BitCast<Object**>(&(thread->pending_message_script_)));
BitCast<Object**, Script**>(&(thread->pending_message_script_))); v->VisitPointer(BitCast<Object**>(&(thread->context_)));
v->VisitPointer(BitCast<Object**, Context**>(&(thread->context_)));
v->VisitPointer(&(thread->scheduled_exception_)); v->VisitPointer(&(thread->scheduled_exception_));
for (v8::TryCatch* block = thread->TryCatchHandler(); for (v8::TryCatch* block = thread->TryCatchHandler();
block != NULL; block != NULL;
block = TRY_CATCH_FROM_ADDRESS(block->next_)) { block = TRY_CATCH_FROM_ADDRESS(block->next_)) {
v->VisitPointer(BitCast<Object**, void**>(&(block->exception_))); v->VisitPointer(BitCast<Object**>(&(block->exception_)));
v->VisitPointer(BitCast<Object**, void**>(&(block->message_))); v->VisitPointer(BitCast<Object**>(&(block->message_)));
} }
// Iterate over pointers on native execution stack. // Iterate over pointers on native execution stack.
@ -521,7 +520,6 @@ void Top::PrintStack(StringStream* accumulator) {
void Top::SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback) { void Top::SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback) {
ASSERT(thread_local_.failed_access_check_callback_ == NULL);
thread_local_.failed_access_check_callback_ = callback; thread_local_.failed_access_check_callback_ = callback;
} }
@ -531,8 +529,6 @@ void Top::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
ASSERT(receiver->IsAccessCheckNeeded()); ASSERT(receiver->IsAccessCheckNeeded());
ASSERT(Top::context()); ASSERT(Top::context());
// The callers of this method are not expecting a GC.
AssertNoAllocation no_gc;
// Get the data object from access check info. // Get the data object from access check info.
JSFunction* constructor = JSFunction::cast(receiver->map()->constructor()); JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());

16
deps/v8/src/type-info.h

@ -54,7 +54,7 @@ class TypeInfo {
static inline TypeInfo Primitive(); static inline TypeInfo Primitive();
// We know it's a number of some sort. // We know it's a number of some sort.
static inline TypeInfo Number(); static inline TypeInfo Number();
// We know it's signed or unsigned 32 bit integer. // We know it's signed 32 bit integer.
static inline TypeInfo Integer32(); static inline TypeInfo Integer32();
// We know it's a Smi. // We know it's a Smi.
static inline TypeInfo Smi(); static inline TypeInfo Smi();
@ -113,19 +113,15 @@ class TypeInfo {
} }
// Integer32 is an integer that can be represented as either a signed // Integer32 is an integer that can be represented as a signed
// 32-bit integer or as an unsigned 32-bit integer. It has to be // 32-bit integer. It has to be in the range [-2^31, 2^31 - 1].
// in the range [-2^31, 2^32 - 1]. We also have to check for negative 0 // We also have to check for negative 0 as it is not an Integer32.
// as it is not an Integer32.
static inline bool IsInt32Double(double value) { static inline bool IsInt32Double(double value) {
const DoubleRepresentation minus_zero(-0.0); const DoubleRepresentation minus_zero(-0.0);
DoubleRepresentation rep(value); DoubleRepresentation rep(value);
if (rep.bits == minus_zero.bits) return false; if (rep.bits == minus_zero.bits) return false;
if (value >= kMinInt && value <= kMaxUInt32) { if (value >= kMinInt && value <= kMaxInt) {
if (value <= kMaxInt && value == static_cast<int32_t>(value)) { if (value == static_cast<int32_t>(value)) return true;
return true;
}
if (value == static_cast<uint32_t>(value)) return true;
} }
return false; return false;
} }

6
deps/v8/src/utils.h

@ -739,7 +739,11 @@ inline Dest BitCast(const Source& source) {
return dest; return dest;
} }
} } // namespace v8::internal template <class Dest, class Source>
inline Dest BitCast(Source* const & source) {
return BitCast<Dest>(reinterpret_cast<uintptr_t>(source));
}
} } // namespace v8::internal
#endif // V8_UTILS_H_ #endif // V8_UTILS_H_

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save